HybridVerifier

Contains more than 2 JUnit-based stereotypes


Class: TestFuseDFS

UtilityVerifier NullVerifier HybridVerifier 
/** * Test concurrent creation and access of the mount */ @Test public void testMultipleThreads() throws IOException { ArrayList threads=new ArrayList(); final AtomicReference errorMessage=new AtomicReference(); for (int i=0; i < 10; i++) { Thread t=new Thread(){ public void run(){ try { File d=new File(mountPoint,"dir" + getId()); execWaitRet("mkdir " + d.getAbsolutePath()); for (int j=0; j < 10; j++) { File f=new File(d,"file" + j); final String contents="thread " + getId() + " "+ j; createFile(f,contents); } for (int j=0; j < 10; j++) { File f=new File(d,"file" + j); execWaitRet("cat " + f.getAbsolutePath()); execWaitRet("rm " + f.getAbsolutePath()); } execWaitRet("rmdir " + d.getAbsolutePath()); } catch ( IOException ie) { errorMessage.set(String.format("Exception %s",StringUtils.stringifyException(ie))); } } } ; t.start(); threads.add(t); } for ( Thread t : threads) { try { t.join(); } catch ( InterruptedException ie) { fail("Thread interrupted: " + ie.getMessage()); } } assertNull(errorMessage.get(),errorMessage.get()); }

UtilityVerifier EqualityVerifier HybridVerifier 
/** * Test random access to a file */ @Test public void testRandomAccess() throws IOException { final String contents="hello world"; File f=new File(mountPoint,"file1"); createFile(f,contents); RandomAccessFile raf=new RandomAccessFile(f,"rw"); raf.seek(f.length()); try { raf.write('b'); } catch ( IOException e) { assertEquals("Operation not supported",e.getMessage()); } finally { raf.close(); } raf=new RandomAccessFile(f,"rw"); raf.seek(0); try { raf.write('b'); fail("Over-wrote existing bytes"); } catch ( IOException e) { assertEquals("Invalid argument",e.getMessage()); } finally { raf.close(); } execAssertSucceeds("rm " + f.getAbsolutePath()); }

Class: org.apache.hadoop.TestRefreshCallQueue

UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testRefresh() throws Exception { assertTrue("Mock queue should have been constructed",mockQueueConstructions > 0); assertTrue("Puts are routed through MockQueue",canPutInMockQueue()); int lastMockQueueConstructions=mockQueueConstructions; DFSAdmin admin=new DFSAdmin(config); String[] args=new String[]{"-refreshCallQueue"}; int exitCode=admin.run(args); assertEquals("DFSAdmin should return 0",0,exitCode); assertEquals("Mock queue should have no additional constructions",lastMockQueueConstructions,mockQueueConstructions); try { assertFalse("Puts are routed through LBQ instead of MockQueue",canPutInMockQueue()); } catch ( IOException ioe) { fail("Could not put into queue at all"); } }

BranchVerifier TestInitializer UtilityVerifier HybridVerifier 
@Before public void setUp() throws Exception { mockQueueConstructions=0; mockQueuePuts=0; int portRetries=5; int nnPort; for (; portRetries > 0; --portRetries) { nnPort=30000 + rand.nextInt(30000); config=new Configuration(); callQueueConfigKey="ipc." + nnPort + ".callqueue.impl"; config.setClass(callQueueConfigKey,MockCallQueue.class,BlockingQueue.class); config.set("hadoop.security.authorization","true"); FileSystem.setDefaultUri(config,"hdfs://localhost:" + nnPort); fs=FileSystem.get(config); try { cluster=new MiniDFSCluster.Builder(config).nameNodePort(nnPort).build(); cluster.waitActive(); break; } catch ( BindException be) { } } if (portRetries == 0) { fail("Failed to pick an ephemeral port for the NameNode RPC server."); } }

Class: org.apache.hadoop.cli.TestCacheAdminCLI

TestInitializer InternalCallVerifier BooleanVerifier HybridVerifier 
@Before @Override public void setUp() throws Exception { super.setUp(); conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,HDFSPolicyProvider.class,PolicyProvider.class); conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY,1); dfsCluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); dfsCluster.waitClusterUp(); namenode=conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY,"file:///"); username=System.getProperty("user.name"); fs=dfsCluster.getFileSystem(); assertTrue("Not a HDFS: " + fs.getUri(),fs instanceof DistributedFileSystem); }

Class: org.apache.hadoop.cli.TestCryptoAdminCLI

TestInitializer InternalCallVerifier BooleanVerifier HybridVerifier 
@Before @Override public void setUp() throws Exception { super.setUp(); conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,HDFSPolicyProvider.class,PolicyProvider.class); conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY,1); tmpDir=new File(System.getProperty("test.build.data","target"),UUID.randomUUID().toString()).getAbsoluteFile(); conf.set(KeyProviderFactory.KEY_PROVIDER_PATH,JavaKeyStoreProvider.SCHEME_NAME + "://file" + tmpDir+ "/test.jks"); dfsCluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); dfsCluster.waitClusterUp(); createAKey("mykey",conf); namenode=conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY,"file:///"); username=System.getProperty("user.name"); fs=dfsCluster.getFileSystem(); assertTrue("Not an HDFS: " + fs.getUri(),fs instanceof DistributedFileSystem); }

Class: org.apache.hadoop.cli.TestHDFSCLI

APIUtilityVerifier TestInitializer InternalCallVerifier BooleanVerifier HybridVerifier 
@Before @Override public void setUp() throws Exception { super.setUp(); conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,HDFSPolicyProvider.class,PolicyProvider.class); conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY,1); String[] racks={"/rack1","/rack1","/rack2","/rack2","/rack2","/rack3","/rack4","/rack4"}; String[] hosts={"host1","host2","host3","host4","host5","host6","host7","host8"}; dfsCluster=new MiniDFSCluster.Builder(conf).numDataNodes(8).racks(racks).hosts(hosts).build(); dfsCluster.waitClusterUp(); namenode=conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY,"file:///"); username=System.getProperty("user.name"); fs=dfsCluster.getFileSystem(); assertTrue("Not a HDFS: " + fs.getUri(),fs instanceof DistributedFileSystem); }

Class: org.apache.hadoop.cli.TestXAttrCLI

TestInitializer InternalCallVerifier BooleanVerifier HybridVerifier 
@Before @Override public void setUp() throws Exception { super.setUp(); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY,true); conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,HDFSPolicyProvider.class,PolicyProvider.class); conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY,1); dfsCluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); dfsCluster.waitClusterUp(); namenode=conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY,"file:///"); username=System.getProperty("user.name"); fs=dfsCluster.getFileSystem(); assertTrue("Not a HDFS: " + fs.getUri(),fs instanceof DistributedFileSystem); }

Class: org.apache.hadoop.conf.TestConfServlet

UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testBadFormat() throws Exception { StringWriter sw=new StringWriter(); try { ConfServlet.writeResponse(getTestConf(),sw,"not a format"); fail("writeResponse with bad format didn't throw!"); } catch ( ConfServlet.BadFormatException bfe) { } assertEquals("",sw.toString()); }

APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testWriteXml() throws Exception { StringWriter sw=new StringWriter(); ConfServlet.writeResponse(getTestConf(),sw,"xml"); String xml=sw.toString(); DocumentBuilderFactory docBuilderFactory=DocumentBuilderFactory.newInstance(); DocumentBuilder builder=docBuilderFactory.newDocumentBuilder(); Document doc=builder.parse(new InputSource(new StringReader(xml))); NodeList nameNodes=doc.getElementsByTagName("name"); boolean foundSetting=false; for (int i=0; i < nameNodes.getLength(); i++) { Node nameNode=nameNodes.item(i); String key=nameNode.getTextContent(); System.err.println("xml key: " + key); if (TEST_KEY.equals(key)) { foundSetting=true; Element propertyElem=(Element)nameNode.getParentNode(); String val=propertyElem.getElementsByTagName("value").item(0).getTextContent(); assertEquals(TEST_VAL,val); } } assertTrue(foundSetting); }

Class: org.apache.hadoop.conf.TestConfigurationDeprecation

BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testIteratorWithDeprecatedKeys(){ Configuration conf=new Configuration(); Configuration.addDeprecation("dK",new String[]{"nK"}); conf.set("k","v"); conf.set("dK","V"); assertEquals("V",conf.get("dK")); assertEquals("V",conf.get("nK")); conf.set("nK","VV"); assertEquals("VV",conf.get("dK")); assertEquals("VV",conf.get("nK")); boolean kFound=false; boolean dKFound=false; boolean nKFound=false; for ( Map.Entry entry : conf) { if (entry.getKey().equals("k")) { assertEquals("v",entry.getValue()); kFound=true; } if (entry.getKey().equals("dK")) { assertEquals("VV",entry.getValue()); dKFound=true; } if (entry.getKey().equals("nK")) { assertEquals("VV",entry.getValue()); nKFound=true; } } assertTrue("regular Key not found",kFound); assertTrue("deprecated Key not found",dKFound); assertTrue("new Key not found",nKFound); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * This test is to ensure the correctness of loading of keys with respect to * being marked as final and that are related to deprecation. * @throws IOException */ @Test public void testDeprecationForFinalParameters() throws IOException { addDeprecationToConfiguration(); out=new BufferedWriter(new FileWriter(CONFIG)); startConfig(); appendProperty("A","a",true); appendProperty("D","d"); appendProperty("E","e"); appendProperty("H","h",true); appendProperty("J","",true); endConfig(); Path fileResource=new Path(CONFIG); conf.addResource(fileResource); assertEquals("a",conf.get("A")); assertEquals("a",conf.get("B")); assertEquals("d",conf.get("C")); assertEquals("d",conf.get("D")); assertEquals("e",conf.get("E")); assertEquals("e",conf.get("F")); assertEquals("h",conf.get("G")); assertEquals("h",conf.get("H")); assertNull(conf.get("I")); assertNull(conf.get("J")); out=new BufferedWriter(new FileWriter(CONFIG2)); startConfig(); appendProperty("B","b"); appendProperty("C","c",true); appendProperty("F","f",true); appendProperty("G","g"); appendProperty("I","i"); endConfig(); Path fileResource1=new Path(CONFIG2); conf.addResource(fileResource1); assertEquals("a",conf.get("A")); assertEquals("a",conf.get("B")); assertEquals("c",conf.get("C")); assertEquals("c",conf.get("D")); assertEquals("f",conf.get("E")); assertEquals("f",conf.get("F")); assertEquals("h",conf.get("G")); assertEquals("h",conf.get("H")); assertNull(conf.get("I")); assertNull(conf.get("J")); out=new BufferedWriter(new FileWriter(CONFIG3)); startConfig(); appendProperty("A","a1"); appendProperty("B","b1"); appendProperty("C","c1"); appendProperty("D","d1"); appendProperty("E","e1"); appendProperty("F","f1"); appendProperty("G","g1"); appendProperty("H","h1"); appendProperty("I","i1"); appendProperty("J","j1"); endConfig(); fileResource=new Path(CONFIG); conf.addResource(fileResource); assertEquals("a",conf.get("A")); assertEquals("a",conf.get("B")); assertEquals("c",conf.get("C")); assertEquals("c",conf.get("D")); assertEquals("f",conf.get("E")); assertEquals("f",conf.get("F")); assertEquals("h",conf.get("G")); assertEquals("h",conf.get("H")); assertNull(conf.get("I")); assertNull(conf.get("J")); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testUnsetWithDeprecatedKeys(){ Configuration conf=new Configuration(); Configuration.addDeprecation("dK",new String[]{"nK"}); conf.set("nK","VV"); assertEquals("VV",conf.get("dK")); assertEquals("VV",conf.get("nK")); conf.unset("dK"); assertNull(conf.get("dK")); assertNull(conf.get("nK")); conf.set("nK","VV"); assertEquals("VV",conf.get("dK")); assertEquals("VV",conf.get("nK")); conf.unset("nK"); assertNull(conf.get("dK")); assertNull(conf.get("nK")); }

Class: org.apache.hadoop.conf.TestDeprecatedKeys

BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testIteratorWithDeprecatedKeysMappedToMultipleNewKeys(){ Configuration conf=new Configuration(); Configuration.addDeprecation("dK",new String[]{"nK1","nK2"}); conf.set("k","v"); conf.set("dK","V"); assertEquals("V",conf.get("dK")); assertEquals("V",conf.get("nK1")); assertEquals("V",conf.get("nK2")); conf.set("nK1","VV"); assertEquals("VV",conf.get("dK")); assertEquals("VV",conf.get("nK1")); assertEquals("VV",conf.get("nK2")); conf.set("nK2","VVV"); assertEquals("VVV",conf.get("dK")); assertEquals("VVV",conf.get("nK2")); assertEquals("VVV",conf.get("nK1")); boolean kFound=false; boolean dKFound=false; boolean nK1Found=false; boolean nK2Found=false; for ( Map.Entry entry : conf) { if (entry.getKey().equals("k")) { assertEquals("v",entry.getValue()); kFound=true; } if (entry.getKey().equals("dK")) { assertEquals("VVV",entry.getValue()); dKFound=true; } if (entry.getKey().equals("nK1")) { assertEquals("VVV",entry.getValue()); nK1Found=true; } if (entry.getKey().equals("nK2")) { assertEquals("VVV",entry.getValue()); nK2Found=true; } } assertTrue("regular Key not found",kFound); assertTrue("deprecated Key not found",dKFound); assertTrue("new Key 1 not found",nK1Found); assertTrue("new Key 2 not found",nK2Found); }

Class: org.apache.hadoop.conf.TestJobConf

InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier 
@Test public void testProfileParamsDefaults(){ JobConf configuration=new JobConf(); String result=configuration.getProfileParams(); Assert.assertNotNull(result); Assert.assertTrue(result.contains("file=%s")); Assert.assertTrue(result.startsWith("-agentlib:hprof")); }

Class: org.apache.hadoop.contrib.bkjournal.TestBookKeeperAsHASharedDir

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test HA failover, where BK, as the shared storage, fails. * Once it becomes available again, a standby can come up. * Verify that any write happening after the BK fail is not * available on the standby. */ @Test public void testFailoverWithFailingBKCluster() throws Exception { int ensembleSize=numBookies + 1; BookieServer newBookie=bkutil.newBookie(); assertEquals("New bookie didn't start",ensembleSize,bkutil.checkBookiesUp(ensembleSize,10)); BookieServer replacementBookie=null; MiniDFSCluster cluster=null; try { Configuration conf=new Configuration(); conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY,1); conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY,BKJMUtil.createJournalURI("/hotfailoverWithFail").toString()); conf.setInt(BookKeeperJournalManager.BKJM_BOOKKEEPER_ENSEMBLE_SIZE,ensembleSize); conf.setInt(BookKeeperJournalManager.BKJM_BOOKKEEPER_QUORUM_SIZE,ensembleSize); BKJMUtil.addJournalManagerDefinition(conf); cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0).manageNameDfsSharedDirs(false).checkExitOnShutdown(false).build(); NameNode nn1=cluster.getNameNode(0); NameNode nn2=cluster.getNameNode(1); cluster.waitActive(); cluster.transitionToActive(0); Path p1=new Path("/testBKJMFailingBKCluster1"); Path p2=new Path("/testBKJMFailingBKCluster2"); FileSystem fs=HATestUtil.configureFailoverFs(cluster,conf); fs.mkdirs(p1); newBookie.shutdown(); assertEquals("New bookie didn't stop",numBookies,bkutil.checkBookiesUp(numBookies,10)); try { fs.mkdirs(p2); fail("mkdirs should result in the NN exiting"); } catch ( RemoteException re) { assertTrue(re.getClassName().contains("ExitException")); } cluster.shutdownNameNode(0); try { cluster.transitionToActive(1); fail("Shouldn't have been able to transition with bookies down"); } catch ( ExitException ee) { assertTrue("Should shutdown due to required journal failure",ee.getMessage().contains("starting log segment 3 failed for required journal")); } replacementBookie=bkutil.newBookie(); assertEquals("Replacement bookie didn't start",ensembleSize,bkutil.checkBookiesUp(ensembleSize,10)); cluster.transitionToActive(1); assertTrue(fs.exists(p1)); assertFalse(fs.exists(p2)); } finally { newBookie.shutdown(); if (replacementBookie != null) { replacementBookie.shutdown(); } if (cluster != null) { cluster.shutdown(); } } }

UtilityVerifier BooleanVerifier HybridVerifier 
/** * Test that two namenodes can't continue as primary */ @Test public void testMultiplePrimariesStarted() throws Exception { Path p1=new Path("/testBKJMMultiplePrimary"); MiniDFSCluster cluster=null; try { Configuration conf=new Configuration(); conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY,1); conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY,BKJMUtil.createJournalURI("/hotfailoverMultiple").toString()); BKJMUtil.addJournalManagerDefinition(conf); cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0).manageNameDfsSharedDirs(false).checkExitOnShutdown(false).build(); NameNode nn1=cluster.getNameNode(0); NameNode nn2=cluster.getNameNode(1); cluster.waitActive(); cluster.transitionToActive(0); FileSystem fs=HATestUtil.configureFailoverFs(cluster,conf); fs.mkdirs(p1); nn1.getRpcServer().rollEditLog(); cluster.transitionToActive(1); fs=cluster.getFileSystem(0); try { fs.delete(p1,true); fail("Log update on older active should cause it to exit"); } catch ( RemoteException re) { assertTrue(re.getClassName().contains("ExitException")); } } finally { if (cluster != null) { cluster.shutdown(); } } }

Class: org.apache.hadoop.contrib.bkjournal.TestBookKeeperEditLogStreams

UtilityVerifier BooleanVerifier HybridVerifier 
/** * Test that bkjm will refuse open a stream on an empty * ledger. */ @Test public void testEmptyInputStream() throws Exception { ZooKeeper zk=BKJMUtil.connectZooKeeper(); BookKeeper bkc=new BookKeeper(new ClientConfiguration(),zk); try { LedgerHandle lh=bkc.createLedger(BookKeeper.DigestType.CRC32,"foobar".getBytes()); lh.close(); EditLogLedgerMetadata metadata=new EditLogLedgerMetadata("/foobar",HdfsConstants.NAMENODE_LAYOUT_VERSION,lh.getId(),0x1234); try { new BookKeeperEditLogInputStream(lh,metadata,-1); fail("Shouldn't get this far, should have thrown"); } catch ( IOException ioe) { assertTrue(ioe.getMessage().contains("Invalid first bk entry to read")); } metadata=new EditLogLedgerMetadata("/foobar",HdfsConstants.NAMENODE_LAYOUT_VERSION,lh.getId(),0x1234); try { new BookKeeperEditLogInputStream(lh,metadata,0); fail("Shouldn't get this far, should have thrown"); } catch ( IOException ioe) { assertTrue(ioe.getMessage().contains("Invalid first bk entry to read")); } } finally { bkc.close(); zk.close(); } }

Class: org.apache.hadoop.contrib.bkjournal.TestBookKeeperJournalManager

UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test that if enough bookies fail to prevent an ensemble, * writes the bookkeeper will fail. Test that when once again * an ensemble is available, it can continue to write. */ @Test public void testAllBookieFailure() throws Exception { BookieServer bookieToFail=bkutil.newBookie(); BookieServer replacementBookie=null; try { int ensembleSize=numBookies + 1; assertEquals("New bookie didn't start",ensembleSize,bkutil.checkBookiesUp(ensembleSize,10)); Configuration conf=new Configuration(); conf.setInt(BookKeeperJournalManager.BKJM_BOOKKEEPER_ENSEMBLE_SIZE,ensembleSize); conf.setInt(BookKeeperJournalManager.BKJM_BOOKKEEPER_QUORUM_SIZE,ensembleSize); long txid=1; NamespaceInfo nsi=newNSInfo(); BookKeeperJournalManager bkjm=new BookKeeperJournalManager(conf,BKJMUtil.createJournalURI("/hdfsjournal-allbookiefailure"),nsi); bkjm.format(nsi); EditLogOutputStream out=bkjm.startLogSegment(txid,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION); for (long i=1; i <= 3; i++) { FSEditLogOp op=FSEditLogTestUtil.getNoOpInstance(); op.setTransactionId(txid++); out.write(op); } out.setReadyToFlush(); out.flush(); bookieToFail.shutdown(); assertEquals("New bookie didn't die",numBookies,bkutil.checkBookiesUp(numBookies,10)); try { for (long i=1; i <= 3; i++) { FSEditLogOp op=FSEditLogTestUtil.getNoOpInstance(); op.setTransactionId(txid++); out.write(op); } out.setReadyToFlush(); out.flush(); fail("should not get to this stage"); } catch ( IOException ioe) { LOG.debug("Error writing to bookkeeper",ioe); assertTrue("Invalid exception message",ioe.getMessage().contains("Failed to write to bookkeeper")); } replacementBookie=bkutil.newBookie(); assertEquals("New bookie didn't start",numBookies + 1,bkutil.checkBookiesUp(numBookies + 1,10)); bkjm.recoverUnfinalizedSegments(); out=bkjm.startLogSegment(txid,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION); for (long i=1; i <= 3; i++) { FSEditLogOp op=FSEditLogTestUtil.getNoOpInstance(); op.setTransactionId(txid++); out.write(op); } out.setReadyToFlush(); out.flush(); } catch ( Exception e) { LOG.error("Exception in test",e); throw e; } finally { if (replacementBookie != null) { replacementBookie.shutdown(); } bookieToFail.shutdown(); if (bkutil.checkBookiesUp(numBookies,30) != numBookies) { LOG.warn("Not all bookies from this test shut down, expect errors"); } } }

IterativeVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testNumberOfTransactionsWithInprogressAtEnd() throws Exception { NamespaceInfo nsi=newNSInfo(); BookKeeperJournalManager bkjm=new BookKeeperJournalManager(conf,BKJMUtil.createJournalURI("/hdfsjournal-inprogressAtEnd"),nsi); bkjm.format(nsi); long txid=1; for (long i=0; i < 3; i++) { long start=txid; EditLogOutputStream out=bkjm.startLogSegment(start,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION); for (long j=1; j <= DEFAULT_SEGMENT_SIZE; j++) { FSEditLogOp op=FSEditLogTestUtil.getNoOpInstance(); op.setTransactionId(txid++); out.write(op); } out.close(); bkjm.finalizeLogSegment(start,(txid - 1)); assertNotNull(zkc.exists(bkjm.finalizedLedgerZNode(start,(txid - 1)),false)); } long start=txid; EditLogOutputStream out=bkjm.startLogSegment(start,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION); for (long j=1; j <= DEFAULT_SEGMENT_SIZE / 2; j++) { FSEditLogOp op=FSEditLogTestUtil.getNoOpInstance(); op.setTransactionId(txid++); out.write(op); } out.setReadyToFlush(); out.flush(); out.abort(); out.close(); long numTrans=bkjm.getNumberOfTransactions(1,true); assertEquals((txid - 1),numTrans); }

IterativeVerifier UtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testNumberOfTransactionsWithGaps() throws Exception { NamespaceInfo nsi=newNSInfo(); BookKeeperJournalManager bkjm=new BookKeeperJournalManager(conf,BKJMUtil.createJournalURI("/hdfsjournal-gaps"),nsi); bkjm.format(nsi); long txid=1; for (long i=0; i < 3; i++) { long start=txid; EditLogOutputStream out=bkjm.startLogSegment(start,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION); for (long j=1; j <= DEFAULT_SEGMENT_SIZE; j++) { FSEditLogOp op=FSEditLogTestUtil.getNoOpInstance(); op.setTransactionId(txid++); out.write(op); } out.close(); bkjm.finalizeLogSegment(start,txid - 1); assertNotNull(zkc.exists(bkjm.finalizedLedgerZNode(start,txid - 1),false)); } zkc.delete(bkjm.finalizedLedgerZNode(DEFAULT_SEGMENT_SIZE + 1,DEFAULT_SEGMENT_SIZE * 2),-1); long numTrans=bkjm.getNumberOfTransactions(1,true); assertEquals(DEFAULT_SEGMENT_SIZE,numTrans); try { numTrans=bkjm.getNumberOfTransactions(DEFAULT_SEGMENT_SIZE + 1,true); fail("Should have thrown corruption exception by this point"); } catch ( JournalManager.CorruptionException ce) { } numTrans=bkjm.getNumberOfTransactions((DEFAULT_SEGMENT_SIZE * 2) + 1,true); assertEquals(DEFAULT_SEGMENT_SIZE,numTrans); }

UtilityVerifier BooleanVerifier HybridVerifier 
/** * If a journal manager has an corrupt inprogress node, ensure that we throw * an error, as this should not be possible, and some third party has * corrupted the zookeeper state */ @Test public void testCorruptInprogressNode() throws Exception { URI uri=BKJMUtil.createJournalURI("/hdfsjournal-corruptInprogress"); NamespaceInfo nsi=newNSInfo(); BookKeeperJournalManager bkjm=new BookKeeperJournalManager(conf,uri,nsi); bkjm.format(nsi); EditLogOutputStream out=bkjm.startLogSegment(1,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION); ; for (long i=1; i <= 100; i++) { FSEditLogOp op=FSEditLogTestUtil.getNoOpInstance(); op.setTransactionId(i); out.write(op); } out.close(); bkjm.finalizeLogSegment(1,100); out=bkjm.startLogSegment(101,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION); out.close(); bkjm.close(); String inprogressZNode=bkjm.inprogressZNode(101); zkc.setData(inprogressZNode,"WholeLottaJunk".getBytes(),-1); bkjm=new BookKeeperJournalManager(conf,uri,nsi); try { bkjm.recoverUnfinalizedSegments(); fail("Should have failed. There should be no way of creating" + " an empty inprogess znode"); } catch ( IOException e) { assertTrue("Exception different than expected",e.getMessage().contains("has no field named")); } finally { bkjm.close(); } }

UtilityVerifier BooleanVerifier HybridVerifier 
/** * If a journal manager has an empty inprogress node, ensure that we throw an * error, as this should not be possible, and some third party has corrupted * the zookeeper state */ @Test public void testEmptyInprogressNode() throws Exception { URI uri=BKJMUtil.createJournalURI("/hdfsjournal-emptyInprogress"); NamespaceInfo nsi=newNSInfo(); BookKeeperJournalManager bkjm=new BookKeeperJournalManager(conf,uri,nsi); bkjm.format(nsi); EditLogOutputStream out=bkjm.startLogSegment(1,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION); ; for (long i=1; i <= 100; i++) { FSEditLogOp op=FSEditLogTestUtil.getNoOpInstance(); op.setTransactionId(i); out.write(op); } out.close(); bkjm.finalizeLogSegment(1,100); out=bkjm.startLogSegment(101,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION); out.close(); bkjm.close(); String inprogressZNode=bkjm.inprogressZNode(101); zkc.setData(inprogressZNode,new byte[0],-1); bkjm=new BookKeeperJournalManager(conf,uri,nsi); try { bkjm.recoverUnfinalizedSegments(); fail("Should have failed. There should be no way of creating" + " an empty inprogess znode"); } catch ( IOException e) { assertTrue("Exception different than expected",e.getMessage().contains("Invalid/Incomplete data in znode")); } finally { bkjm.close(); } }

UtilityVerifier InternalCallVerifier NullVerifier HybridVerifier 
/** * Create a bkjm namespace, write a journal from txid 1, close stream. * Try to create a new journal from txid 1. Should throw an exception. */ @Test public void testWriteRestartFrom1() throws Exception { NamespaceInfo nsi=newNSInfo(); BookKeeperJournalManager bkjm=new BookKeeperJournalManager(conf,BKJMUtil.createJournalURI("/hdfsjournal-restartFrom1"),nsi); bkjm.format(nsi); long txid=1; long start=txid; EditLogOutputStream out=bkjm.startLogSegment(txid,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION); for (long j=1; j <= DEFAULT_SEGMENT_SIZE; j++) { FSEditLogOp op=FSEditLogTestUtil.getNoOpInstance(); op.setTransactionId(txid++); out.write(op); } out.close(); bkjm.finalizeLogSegment(start,(txid - 1)); txid=1; try { out=bkjm.startLogSegment(txid,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION); fail("Shouldn't be able to start another journal from " + txid + " when one already exists"); } catch ( Exception ioe) { LOG.info("Caught exception as expected",ioe); } txid=DEFAULT_SEGMENT_SIZE; try { out=bkjm.startLogSegment(txid,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION); fail("Shouldn't be able to start another journal from " + txid + " when one already exists"); } catch ( IOException ioe) { LOG.info("Caught exception as expected",ioe); } txid=DEFAULT_SEGMENT_SIZE + 1; start=txid; out=bkjm.startLogSegment(start,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION); assertNotNull(out); for (long j=1; j <= DEFAULT_SEGMENT_SIZE; j++) { FSEditLogOp op=FSEditLogTestUtil.getNoOpInstance(); op.setTransactionId(txid++); out.write(op); } out.close(); bkjm.finalizeLogSegment(start,(txid - 1)); txid=DEFAULT_SEGMENT_SIZE * 4; out=bkjm.startLogSegment(txid,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION); assertNotNull(out); }

Class: org.apache.hadoop.contrib.bkjournal.TestCurrentInprogress

InternalCallVerifier EqualityVerifier ExceptionVerifier HybridVerifier 
/** * Tests that update should throw IOE, if version number modifies between read * and update */ @Test(expected=IOException.class) public void testUpdateShouldFailWithIOEIfVersionNumberChangedAfterRead() throws Exception { CurrentInprogress ci=new CurrentInprogress(zkc,CURRENT_NODE_PATH); ci.init(); ci.update("myInprogressZnode"); assertEquals("Not returning myInprogressZnode","myInprogressZnode",ci.read()); ci.update("YourInprogressZnode"); ci.update("myInprogressZnode"); }

Class: org.apache.hadoop.crypto.CryptoStreamsTestBase

APIUtilityVerifier UtilityVerifier EqualityVerifier HybridVerifier 
/** * Test skip. */ @Test(timeout=120000) public void testSkip() throws Exception { OutputStream out=getOutputStream(defaultBufferSize); writeData(out); InputStream in=getInputStream(defaultBufferSize); byte[] result=new byte[dataLen]; int n1=readAll(in,result,0,dataLen / 3); Assert.assertEquals(n1,((Seekable)in).getPos()); long skipped=in.skip(dataLen / 3); int n2=readAll(in,result,0,dataLen); Assert.assertEquals(dataLen,n1 + skipped + n2); byte[] readData=new byte[n2]; System.arraycopy(result,0,readData,0,n2); byte[] expectedData=new byte[n2]; System.arraycopy(data,dataLen - n2,expectedData,0,n2); Assert.assertArrayEquals(readData,expectedData); try { skipped=in.skip(-3); Assert.fail("Skip Negative length should fail."); } catch ( IllegalArgumentException e) { GenericTestUtils.assertExceptionContains("Negative skip length",e); } skipped=in.skip(3); Assert.assertEquals(skipped,0); in.close(); }

APIUtilityVerifier UtilityVerifier EqualityVerifier HybridVerifier 
/** * Test seek to different position. */ @Test(timeout=120000) public void testSeek() throws Exception { OutputStream out=getOutputStream(defaultBufferSize); writeData(out); InputStream in=getInputStream(defaultBufferSize); seekCheck(in,dataLen / 3); seekCheck(in,0); seekCheck(in,dataLen / 2); final long pos=((Seekable)in).getPos(); try { seekCheck(in,-3); Assert.fail("Seek to negative offset should fail."); } catch ( IllegalArgumentException e) { GenericTestUtils.assertExceptionContains("Cannot seek to negative " + "offset",e); } Assert.assertEquals(pos,((Seekable)in).getPos()); try { seekCheck(in,dataLen + 3); Assert.fail("Seek after EOF should fail."); } catch ( IOException e) { GenericTestUtils.assertExceptionContains("Cannot seek after EOF",e); } Assert.assertEquals(pos,((Seekable)in).getPos()); in.close(); }

Class: org.apache.hadoop.crypto.TestCryptoCodec

AssumptionSetter EqualityVerifier HybridVerifier 
@Test(timeout=1200000) public void testOpensslAesCtrCryptoCodec() throws Exception { Assume.assumeTrue(NativeCodeLoader.buildSupportsOpenssl()); Assert.assertEquals(null,OpensslCipher.getLoadingFailureReason()); cryptoCodecTest(conf,seed,0,"org.apache.hadoop.crypto.OpensslAesCtrCryptoCodec"); cryptoCodecTest(conf,seed,count,"org.apache.hadoop.crypto.OpensslAesCtrCryptoCodec"); }

Class: org.apache.hadoop.crypto.TestCryptoStreamsForLocalFS

TestCleaner BooleanVerifier HybridVerifier 
@After public void cleanUp() throws IOException { FileUtil.setWritable(base,true); FileUtil.fullyDelete(base); assertTrue(!base.exists()); }

Class: org.apache.hadoop.crypto.TestOpensslCipher

UtilityVerifier BooleanVerifier AssumptionSetter HybridVerifier 
@Test(timeout=120000) public void testGetInstance() throws Exception { Assume.assumeTrue(OpensslCipher.getLoadingFailureReason() == null); OpensslCipher cipher=OpensslCipher.getInstance("AES/CTR/NoPadding"); Assert.assertTrue(cipher != null); try { cipher=OpensslCipher.getInstance("AES2/CTR/NoPadding"); Assert.fail("Should specify correct algorithm."); } catch ( NoSuchAlgorithmException e) { } try { cipher=OpensslCipher.getInstance("AES/CTR/NoPadding2"); Assert.fail("Should specify correct padding."); } catch ( NoSuchPaddingException e) { } }

UtilityVerifier BooleanVerifier AssumptionSetter HybridVerifier 
@Test(timeout=120000) public void testDoFinalArguments() throws Exception { Assume.assumeTrue(OpensslCipher.getLoadingFailureReason() == null); OpensslCipher cipher=OpensslCipher.getInstance("AES/CTR/NoPadding"); Assert.assertTrue(cipher != null); cipher.init(OpensslCipher.ENCRYPT_MODE,key,iv); ByteBuffer output=ByteBuffer.allocate(1024); try { cipher.doFinal(output); Assert.fail("Output buffer should be direct buffer."); } catch ( IllegalArgumentException e) { GenericTestUtils.assertExceptionContains("Direct buffer is required",e); } }

UtilityVerifier BooleanVerifier AssumptionSetter HybridVerifier 
@Test(timeout=120000) public void testUpdateArguments() throws Exception { Assume.assumeTrue(OpensslCipher.getLoadingFailureReason() == null); OpensslCipher cipher=OpensslCipher.getInstance("AES/CTR/NoPadding"); Assert.assertTrue(cipher != null); cipher.init(OpensslCipher.ENCRYPT_MODE,key,iv); ByteBuffer input=ByteBuffer.allocate(1024); ByteBuffer output=ByteBuffer.allocate(1024); try { cipher.update(input,output); Assert.fail("Input and output buffer should be direct buffer."); } catch ( IllegalArgumentException e) { GenericTestUtils.assertExceptionContains("Direct buffers are required",e); } input=ByteBuffer.allocateDirect(1024); output=ByteBuffer.allocateDirect(1000); try { cipher.update(input,output); Assert.fail("Output buffer length should be sufficient " + "to store output data"); } catch ( ShortBufferException e) { GenericTestUtils.assertExceptionContains("Output buffer is not sufficient",e); } }

Class: org.apache.hadoop.crypto.key.TestKeyProvider

BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testParseVersionName() throws Exception { assertEquals("/a/b",KeyProvider.getBaseName("/a/b@3")); assertEquals("/aaa",KeyProvider.getBaseName("/aaa@112")); try { KeyProvider.getBaseName("no-slashes"); assertTrue("should have thrown",false); } catch ( IOException e) { assertTrue(true); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testMetadata() throws Exception { DateFormat format=new SimpleDateFormat("y/m/d"); Date date=format.parse("2013/12/25"); KeyProvider.Metadata meta=new KeyProvider.Metadata("myCipher",100,null,null,date,123); assertEquals("myCipher",meta.getCipher()); assertEquals(100,meta.getBitLength()); assertNull(meta.getDescription()); assertEquals(date,meta.getCreated()); assertEquals(123,meta.getVersions()); KeyProvider.Metadata second=new KeyProvider.Metadata(meta.serialize()); assertEquals(meta.getCipher(),second.getCipher()); assertEquals(meta.getBitLength(),second.getBitLength()); assertNull(second.getDescription()); assertTrue(second.getAttributes().isEmpty()); assertEquals(meta.getCreated(),second.getCreated()); assertEquals(meta.getVersions(),second.getVersions()); int newVersion=second.addVersion(); assertEquals(123,newVersion); assertEquals(124,second.getVersions()); assertEquals(123,meta.getVersions()); format=new SimpleDateFormat("y/m/d"); date=format.parse("2013/12/25"); Map attributes=new HashMap(); attributes.put("a","A"); meta=new KeyProvider.Metadata("myCipher",100,"description",attributes,date,123); assertEquals("myCipher",meta.getCipher()); assertEquals(100,meta.getBitLength()); assertEquals("description",meta.getDescription()); assertEquals(attributes,meta.getAttributes()); assertEquals(date,meta.getCreated()); assertEquals(123,meta.getVersions()); second=new KeyProvider.Metadata(meta.serialize()); assertEquals(meta.getCipher(),second.getCipher()); assertEquals(meta.getBitLength(),second.getBitLength()); assertEquals(meta.getDescription(),second.getDescription()); assertEquals(meta.getAttributes(),second.getAttributes()); assertEquals(meta.getCreated(),second.getCreated()); assertEquals(meta.getVersions(),second.getVersions()); newVersion=second.addVersion(); assertEquals(123,newVersion); assertEquals(124,second.getVersions()); assertEquals(123,meta.getVersions()); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testMaterialGeneration() throws Exception { MyKeyProvider kp=new MyKeyProvider(); KeyProvider.Options options=new KeyProvider.Options(new Configuration()); options.setCipher(CIPHER); options.setBitLength(128); kp.createKey("hello",options); Assert.assertEquals(128,kp.size); Assert.assertEquals(CIPHER,kp.algorithm); Assert.assertNotNull(kp.material); kp=new MyKeyProvider(); kp.rollNewVersion("hello"); Assert.assertEquals(128,kp.size); Assert.assertEquals(CIPHER,kp.algorithm); Assert.assertNotNull(kp.material); }

Class: org.apache.hadoop.crypto.key.TestKeyProviderCryptoExtension

BranchVerifier UtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testGenerateEncryptedKey() throws Exception { KeyProviderCryptoExtension.EncryptedKeyVersion ek1=kpExt.generateEncryptedKey(encryptionKey.getName()); assertEquals("Version name of EEK should be EEK",KeyProviderCryptoExtension.EEK,ek1.getEncryptedKeyVersion().getVersionName()); assertEquals("Name of EEK should be encryption key name",ENCRYPTION_KEY_NAME,ek1.getEncryptionKeyName()); assertNotNull("Expected encrypted key material",ek1.getEncryptedKeyVersion().getMaterial()); assertEquals("Length of encryption key material and EEK material should " + "be the same",encryptionKey.getMaterial().length,ek1.getEncryptedKeyVersion().getMaterial().length); KeyVersion k1=kpExt.decryptEncryptedKey(ek1); assertEquals(KeyProviderCryptoExtension.EK,k1.getVersionName()); assertEquals(encryptionKey.getMaterial().length,k1.getMaterial().length); if (Arrays.equals(k1.getMaterial(),encryptionKey.getMaterial())) { fail("Encrypted key material should not equal encryption key material"); } if (Arrays.equals(ek1.getEncryptedKeyVersion().getMaterial(),encryptionKey.getMaterial())) { fail("Encrypted key material should not equal decrypted key material"); } KeyVersion k1a=kpExt.decryptEncryptedKey(ek1); assertArrayEquals(k1.getMaterial(),k1a.getMaterial()); KeyProviderCryptoExtension.EncryptedKeyVersion ek2=kpExt.generateEncryptedKey(encryptionKey.getName()); KeyVersion k2=kpExt.decryptEncryptedKey(ek2); if (Arrays.equals(k1.getMaterial(),k2.getMaterial())) { fail("Generated EEKs should have different material!"); } if (Arrays.equals(ek1.getEncryptedKeyIv(),ek2.getEncryptedKeyIv())) { fail("Generated EEKs should have different IVs!"); } }

Class: org.apache.hadoop.crypto.key.TestKeyProviderDelegationTokenExtension

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testCreateExtension() throws Exception { Configuration conf=new Configuration(); Credentials credentials=new Credentials(); KeyProvider kp=new UserProvider.Factory().createProvider(new URI("user:///"),conf); KeyProviderDelegationTokenExtension kpDTE1=KeyProviderDelegationTokenExtension.createKeyProviderDelegationTokenExtension(kp); Assert.assertNotNull(kpDTE1); Assert.assertNull(kpDTE1.addDelegationTokens("user",credentials)); MockKeyProvider mock=mock(MockKeyProvider.class); when(mock.addDelegationTokens("renewer",credentials)).thenReturn(new Token[]{new Token(null,null,new Text("kind"),new Text("service"))}); KeyProviderDelegationTokenExtension kpDTE2=KeyProviderDelegationTokenExtension.createKeyProviderDelegationTokenExtension(mock); Token[] tokens=kpDTE2.addDelegationTokens("renewer",credentials); Assert.assertNotNull(tokens); Assert.assertEquals("kind",tokens[0].getKind().toString()); }

Class: org.apache.hadoop.crypto.key.TestKeyProviderFactory

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
@Test public void testJksProvider() throws Exception { Configuration conf=new Configuration(); final String ourUrl=JavaKeyStoreProvider.SCHEME_NAME + "://file" + tmpDir+ "/test.jks"; File file=new File(tmpDir,"test.jks"); file.delete(); conf.set(KeyProviderFactory.KEY_PROVIDER_PATH,ourUrl); checkSpecificProvider(conf,ourUrl); Path path=ProviderUtils.unnestUri(new URI(ourUrl)); FileSystem fs=path.getFileSystem(conf); FileStatus s=fs.getFileStatus(path); assertTrue(s.getPermission().toString().equals("rwx------")); assertTrue(file + " should exist",file.isFile()); File oldFile=new File(file.getPath() + "_OLD"); file.renameTo(oldFile); file.delete(); file.createNewFile(); assertTrue(oldFile.exists()); KeyProvider provider=KeyProviderFactory.getProviders(conf).get(0); assertTrue(file.exists()); assertTrue(oldFile + "should be deleted",!oldFile.exists()); verifyAfterReload(file,provider); assertTrue(!oldFile.exists()); File newFile=new File(file.getPath() + "_NEW"); newFile.createNewFile(); try { provider=KeyProviderFactory.getProviders(conf).get(0); Assert.fail("_NEW and current file should not exist together !!"); } catch ( Exception e) { } finally { if (newFile.exists()) { newFile.delete(); } } file.renameTo(newFile); file.delete(); try { provider=KeyProviderFactory.getProviders(conf).get(0); Assert.assertFalse(newFile.exists()); Assert.assertFalse(oldFile.exists()); } catch ( Exception e) { Assert.fail("JKS should load from _NEW file !!"); } verifyAfterReload(file,provider); newFile.createNewFile(); file.renameTo(oldFile); file.delete(); try { provider=KeyProviderFactory.getProviders(conf).get(0); Assert.assertFalse(newFile.exists()); Assert.assertFalse(oldFile.exists()); } catch ( Exception e) { Assert.fail("JKS should load from _OLD file !!"); } finally { if (newFile.exists()) { newFile.delete(); } } verifyAfterReload(file,provider); fs.setPermission(path,new FsPermission("777")); checkPermissionRetention(conf,ourUrl,path); }

BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testUriErrors() throws Exception { Configuration conf=new Configuration(); conf.set(KeyProviderFactory.KEY_PROVIDER_PATH,"unkn@own:/x/y"); try { List providers=KeyProviderFactory.getProviders(conf); assertTrue("should throw!",false); } catch ( IOException e) { assertEquals("Bad configuration of " + KeyProviderFactory.KEY_PROVIDER_PATH + " at unkn@own:/x/y",e.getMessage()); } }

BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testFactoryErrors() throws Exception { Configuration conf=new Configuration(); conf.set(KeyProviderFactory.KEY_PROVIDER_PATH,"unknown:///"); try { List providers=KeyProviderFactory.getProviders(conf); assertTrue("should throw!",false); } catch ( IOException e) { assertEquals("No KeyProviderFactory for unknown:/// in " + KeyProviderFactory.KEY_PROVIDER_PATH,e.getMessage()); } }

APIUtilityVerifier UtilityVerifier InternalCallVerifier NullVerifier HybridVerifier 
@Test public void testJksProviderPasswordViaConfig() throws Exception { Configuration conf=new Configuration(); final String ourUrl=JavaKeyStoreProvider.SCHEME_NAME + "://file" + tmpDir+ "/test.jks"; File file=new File(tmpDir,"test.jks"); file.delete(); try { conf.set(KeyProviderFactory.KEY_PROVIDER_PATH,ourUrl); conf.set(JavaKeyStoreProvider.KEYSTORE_PASSWORD_FILE_KEY,"javakeystoreprovider.password"); KeyProvider provider=KeyProviderFactory.getProviders(conf).get(0); provider.createKey("key3",new byte[16],KeyProvider.options(conf)); provider.flush(); } catch ( Exception ex) { Assert.fail("could not create keystore with password file"); } KeyProvider provider=KeyProviderFactory.getProviders(conf).get(0); Assert.assertNotNull(provider.getCurrentKey("key3")); try { conf.set(JavaKeyStoreProvider.KEYSTORE_PASSWORD_FILE_KEY,"bar"); KeyProviderFactory.getProviders(conf).get(0); Assert.fail("using non existing password file, it should fail"); } catch ( IOException ex) { } try { conf.set(JavaKeyStoreProvider.KEYSTORE_PASSWORD_FILE_KEY,"core-site.xml"); KeyProviderFactory.getProviders(conf).get(0); Assert.fail("using different password file, it should fail"); } catch ( IOException ex) { } try { conf.unset(JavaKeyStoreProvider.KEYSTORE_PASSWORD_FILE_KEY); KeyProviderFactory.getProviders(conf).get(0); Assert.fail("No password file property, env not set, it should fail"); } catch ( IOException ex) { } }

Class: org.apache.hadoop.crypto.key.TestKeyShell

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testInvalidProvider() throws Exception { final String[] args1={"create","key1","-cipher","AES","-provider","sdff://file/tmp/keystore.jceks"}; int rc=0; KeyShell ks=new KeyShell(); ks.setConf(new Configuration()); rc=ks.run(args1); assertEquals(1,rc); assertTrue(outContent.toString().contains("There are no valid " + "KeyProviders configured.")); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testInvalidKeySize() throws Exception { final String[] args1={"create","key1","-size","56","-provider",jceksProvider}; int rc=0; KeyShell ks=new KeyShell(); ks.setConf(new Configuration()); rc=ks.run(args1); assertEquals(1,rc); assertTrue(outContent.toString().contains("key1 has not been created.")); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testTransientProviderOnlyConfig() throws Exception { final String[] args1={"create","key1"}; int rc=0; KeyShell ks=new KeyShell(); Configuration config=new Configuration(); config.set(KeyProviderFactory.KEY_PROVIDER_PATH,"user:///"); ks.setConf(config); rc=ks.run(args1); assertEquals(1,rc); assertTrue(outContent.toString().contains("There are no valid " + "KeyProviders configured.")); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testKeySuccessfulKeyLifecycle() throws Exception { int rc=0; String keyName="key1"; KeyShell ks=new KeyShell(); ks.setConf(new Configuration()); outContent.reset(); final String[] args1={"create",keyName,"-provider",jceksProvider}; rc=ks.run(args1); assertEquals(0,rc); assertTrue(outContent.toString().contains(keyName + " has been " + "successfully created")); String listOut=listKeys(ks,false); assertTrue(listOut.contains(keyName)); listOut=listKeys(ks,true); assertTrue(listOut.contains(keyName)); assertTrue(listOut.contains("description")); assertTrue(listOut.contains("created")); outContent.reset(); final String[] args2={"roll",keyName,"-provider",jceksProvider}; rc=ks.run(args2); assertEquals(0,rc); assertTrue(outContent.toString().contains("key1 has been successfully " + "rolled.")); deleteKey(ks,keyName); listOut=listKeys(ks,false); assertFalse(listOut,listOut.contains(keyName)); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testAttributes() throws Exception { int rc; KeyShell ks=new KeyShell(); ks.setConf(new Configuration()); final String[] args1={"create","keyattr1","-provider",jceksProvider,"-attr","foo=bar"}; rc=ks.run(args1); assertEquals(0,rc); assertTrue(outContent.toString().contains("keyattr1 has been " + "successfully created")); String listOut=listKeys(ks,true); assertTrue(listOut.contains("keyattr1")); assertTrue(listOut.contains("attributes: [foo=bar]")); outContent.reset(); final String[] args2={"create","keyattr2","-provider",jceksProvider,"-attr","=bar"}; rc=ks.run(args2); assertEquals(1,rc); outContent.reset(); args2[5]="foo"; rc=ks.run(args2); assertEquals(1,rc); outContent.reset(); args2[5]="="; rc=ks.run(args2); assertEquals(1,rc); outContent.reset(); args2[5]="a=b=c"; rc=ks.run(args2); assertEquals(0,rc); listOut=listKeys(ks,true); assertTrue(listOut.contains("keyattr2")); assertTrue(listOut.contains("attributes: [a=b=c]")); outContent.reset(); final String[] args3={"create","keyattr3","-provider",jceksProvider,"-attr","foo = bar","-attr"," glarch =baz ","-attr","abc=def"}; rc=ks.run(args3); assertEquals(0,rc); listOut=listKeys(ks,true); assertTrue(listOut.contains("keyattr3")); assertTrue(listOut.contains("[foo=bar]")); assertTrue(listOut.contains("[glarch=baz]")); assertTrue(listOut.contains("[abc=def]")); outContent.reset(); final String[] args4={"create","keyattr4","-provider",jceksProvider,"-attr","foo=bar","-attr","foo=glarch"}; rc=ks.run(args4); assertEquals(1,rc); deleteKey(ks,"keyattr1"); deleteKey(ks,"keyattr2"); deleteKey(ks,"keyattr3"); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testInvalidCipher() throws Exception { final String[] args1={"create","key1","-cipher","LJM","-provider",jceksProvider}; int rc=0; KeyShell ks=new KeyShell(); ks.setConf(new Configuration()); rc=ks.run(args1); assertEquals(1,rc); assertTrue(outContent.toString().contains("key1 has not been created.")); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testKeySuccessfulCreationWithDescription() throws Exception { outContent.reset(); final String[] args1={"create","key1","-provider",jceksProvider,"-description","someDescription"}; int rc=0; KeyShell ks=new KeyShell(); ks.setConf(new Configuration()); rc=ks.run(args1); assertEquals(0,rc); assertTrue(outContent.toString().contains("key1 has been successfully " + "created")); String listOut=listKeys(ks,true); assertTrue(listOut.contains("description")); assertTrue(listOut.contains("someDescription")); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testTransientProviderWarning() throws Exception { final String[] args1={"create","key1","-cipher","AES","-provider","user:///"}; int rc=0; KeyShell ks=new KeyShell(); ks.setConf(new Configuration()); rc=ks.run(args1); assertEquals(0,rc); assertTrue(outContent.toString().contains("WARNING: you are modifying a " + "transient provider.")); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testFullCipher() throws Exception { final String keyName="key1"; final String[] args1={"create",keyName,"-cipher","AES/CBC/pkcs5Padding","-provider",jceksProvider}; int rc=0; KeyShell ks=new KeyShell(); ks.setConf(new Configuration()); rc=ks.run(args1); assertEquals(0,rc); assertTrue(outContent.toString().contains(keyName + " has been " + "successfully created")); deleteKey(ks,keyName); }

Class: org.apache.hadoop.crypto.key.kms.server.TestKMS

UtilityVerifier InternalCallVerifier NullVerifier HybridVerifier 
@Test public void testACLs() throws Exception { Configuration conf=new Configuration(); conf.set("hadoop.security.authentication","kerberos"); UserGroupInformation.setConfiguration(conf); final File testDir=getTestDir(); conf=createBaseKMSConf(testDir); conf.set("hadoop.kms.authentication.type","kerberos"); conf.set("hadoop.kms.authentication.kerberos.keytab",keytab.getAbsolutePath()); conf.set("hadoop.kms.authentication.kerberos.principal","HTTP/localhost"); conf.set("hadoop.kms.authentication.kerberos.name.rules","DEFAULT"); for ( KMSACLs.Type type : KMSACLs.Type.values()) { conf.set(type.getConfigKey(),type.toString()); } conf.set(KMSACLs.Type.CREATE.getConfigKey(),KMSACLs.Type.CREATE.toString() + ",SET_KEY_MATERIAL"); conf.set(KMSACLs.Type.ROLLOVER.getConfigKey(),KMSACLs.Type.ROLLOVER.toString() + ",SET_KEY_MATERIAL"); writeConf(testDir,conf); runServer(null,null,testDir,new KMSCallable(){ @Override public Void call() throws Exception { final Configuration conf=new Configuration(); conf.setInt(KeyProvider.DEFAULT_BITLENGTH_NAME,128); final URI uri=createKMSUri(getKMSUrl()); doAs("client",new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { KeyProvider kp=new KMSClientProvider(uri,conf); try { kp.createKey("k",new KeyProvider.Options(conf)); Assert.fail(); } catch ( AuthorizationException ex) { } catch ( Exception ex) { Assert.fail(ex.getMessage()); } try { kp.createKey("k",new byte[16],new KeyProvider.Options(conf)); Assert.fail(); } catch ( AuthorizationException ex) { } catch ( Exception ex) { Assert.fail(ex.getMessage()); } try { kp.rollNewVersion("k"); Assert.fail(); } catch ( AuthorizationException ex) { } catch ( Exception ex) { Assert.fail(ex.getMessage()); } try { kp.rollNewVersion("k",new byte[16]); Assert.fail(); } catch ( AuthorizationException ex) { } catch ( Exception ex) { Assert.fail(ex.getMessage()); } try { kp.getKeys(); Assert.fail(); } catch ( AuthorizationException ex) { } catch ( Exception ex) { Assert.fail(ex.getMessage()); } try { kp.getKeysMetadata("k"); Assert.fail(); } catch ( AuthorizationException ex) { } catch ( Exception ex) { Assert.fail(ex.getMessage()); } try { kp.getKeyVersion("k@0"); Assert.fail(); } catch ( AuthorizationException ex) { } catch ( Exception ex) { Assert.fail(ex.getMessage()); } try { kp.getCurrentKey("k"); Assert.fail(); } catch ( AuthorizationException ex) { } catch ( Exception ex) { Assert.fail(ex.getMessage()); } try { kp.getMetadata("k"); Assert.fail(); } catch ( AuthorizationException ex) { } catch ( Exception ex) { Assert.fail(ex.getMessage()); } try { kp.getKeyVersions("k"); Assert.fail(); } catch ( AuthorizationException ex) { } catch ( Exception ex) { Assert.fail(ex.getMessage()); } return null; } } ); doAs("CREATE",new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { KeyProvider kp=new KMSClientProvider(uri,conf); try { KeyProvider.KeyVersion kv=kp.createKey("k0",new KeyProvider.Options(conf)); Assert.assertNull(kv.getMaterial()); } catch ( Exception ex) { Assert.fail(ex.getMessage()); } return null; } } ); doAs("DELETE",new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { KeyProvider kp=new KMSClientProvider(uri,conf); try { kp.deleteKey("k0"); } catch ( Exception ex) { Assert.fail(ex.getMessage()); } return null; } } ); doAs("SET_KEY_MATERIAL",new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { KeyProvider kp=new KMSClientProvider(uri,conf); try { KeyProvider.KeyVersion kv=kp.createKey("k1",new byte[16],new KeyProvider.Options(conf)); Assert.assertNull(kv.getMaterial()); } catch ( Exception ex) { Assert.fail(ex.getMessage()); } return null; } } ); doAs("ROLLOVER",new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { KeyProvider kp=new KMSClientProvider(uri,conf); try { KeyProvider.KeyVersion kv=kp.rollNewVersion("k1"); Assert.assertNull(kv.getMaterial()); } catch ( Exception ex) { Assert.fail(ex.getMessage()); } return null; } } ); doAs("SET_KEY_MATERIAL",new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { KeyProvider kp=new KMSClientProvider(uri,conf); try { KeyProvider.KeyVersion kv=kp.rollNewVersion("k1",new byte[16]); Assert.assertNull(kv.getMaterial()); } catch ( Exception ex) { Assert.fail(ex.getMessage()); } return null; } } ); final KeyVersion currKv=doAs("GET",new PrivilegedExceptionAction(){ @Override public KeyVersion run() throws Exception { KeyProvider kp=new KMSClientProvider(uri,conf); try { kp.getKeyVersion("k1@0"); KeyVersion kv=kp.getCurrentKey("k1"); return kv; } catch ( Exception ex) { Assert.fail(ex.toString()); } return null; } } ); final EncryptedKeyVersion encKv=doAs("GENERATE_EEK",new PrivilegedExceptionAction(){ @Override public EncryptedKeyVersion run() throws Exception { KeyProvider kp=new KMSClientProvider(uri,conf); try { KeyProviderCryptoExtension kpCE=KeyProviderCryptoExtension.createKeyProviderCryptoExtension(kp); EncryptedKeyVersion ek1=kpCE.generateEncryptedKey(currKv.getName()); return ek1; } catch ( Exception ex) { Assert.fail(ex.toString()); } return null; } } ); doAs("DECRYPT_EEK",new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { KeyProvider kp=new KMSClientProvider(uri,conf); try { KeyProviderCryptoExtension kpCE=KeyProviderCryptoExtension.createKeyProviderCryptoExtension(kp); kpCE.decryptEncryptedKey(encKv); } catch ( Exception ex) { Assert.fail(ex.getMessage()); } return null; } } ); doAs("GET_KEYS",new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { KeyProvider kp=new KMSClientProvider(uri,conf); try { kp.getKeys(); } catch ( Exception ex) { Assert.fail(ex.getMessage()); } return null; } } ); doAs("GET_METADATA",new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { KeyProvider kp=new KMSClientProvider(uri,conf); try { kp.getMetadata("k1"); kp.getKeysMetadata("k1"); } catch ( Exception ex) { Assert.fail(ex.getMessage()); } return null; } } ); KMSWebApp.getACLs().stopReloader(); Thread.sleep(10); conf.set(KMSACLs.Type.CREATE.getConfigKey(),"foo"); writeConf(testDir,conf); Thread.sleep(1000); KMSWebApp.getACLs().run(); doAs("CREATE",new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { try { KeyProvider kp=new KMSClientProvider(uri,conf); KeyProvider.KeyVersion kv=kp.createKey("k2",new KeyProvider.Options(conf)); Assert.fail(); } catch ( AuthorizationException ex) { } catch ( Exception ex) { Assert.fail(ex.getMessage()); } return null; } } ); return null; } } ); }

UtilityVerifier InternalCallVerifier NullVerifier HybridVerifier 
@Test public void testServicePrincipalACLs() throws Exception { Configuration conf=new Configuration(); conf.set("hadoop.security.authentication","kerberos"); UserGroupInformation.setConfiguration(conf); File testDir=getTestDir(); conf=createBaseKMSConf(testDir); conf.set("hadoop.kms.authentication.type","kerberos"); conf.set("hadoop.kms.authentication.kerberos.keytab",keytab.getAbsolutePath()); conf.set("hadoop.kms.authentication.kerberos.principal","HTTP/localhost"); conf.set("hadoop.kms.authentication.kerberos.name.rules","DEFAULT"); for ( KMSACLs.Type type : KMSACLs.Type.values()) { conf.set(type.getConfigKey()," "); } conf.set(KMSACLs.Type.CREATE.getConfigKey(),"client"); writeConf(testDir,conf); runServer(null,null,testDir,new KMSCallable(){ @Override public Void call() throws Exception { final Configuration conf=new Configuration(); conf.setInt(KeyProvider.DEFAULT_BITLENGTH_NAME,128); conf.setInt(KeyProvider.DEFAULT_BITLENGTH_NAME,64); final URI uri=createKMSUri(getKMSUrl()); doAs("client",new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { try { KeyProvider kp=new KMSClientProvider(uri,conf); KeyProvider.KeyVersion kv=kp.createKey("ck0",new KeyProvider.Options(conf)); Assert.assertNull(kv.getMaterial()); } catch ( Exception ex) { Assert.fail(ex.getMessage()); } return null; } } ); doAs("client/host",new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { try { KeyProvider kp=new KMSClientProvider(uri,conf); KeyProvider.KeyVersion kv=kp.createKey("ck1",new KeyProvider.Options(conf)); Assert.assertNull(kv.getMaterial()); } catch ( Exception ex) { Assert.fail(ex.getMessage()); } return null; } } ); return null; } } ); }

UtilityVerifier InternalCallVerifier NullVerifier HybridVerifier 
@Test public void testProxyUser() throws Exception { Configuration conf=new Configuration(); conf.set("hadoop.security.authentication","kerberos"); UserGroupInformation.setConfiguration(conf); final File testDir=getTestDir(); conf=createBaseKMSConf(testDir); conf.set("hadoop.kms.authentication.type","kerberos"); conf.set("hadoop.kms.authentication.kerberos.keytab",keytab.getAbsolutePath()); conf.set("hadoop.kms.authentication.kerberos.principal","HTTP/localhost"); conf.set("hadoop.kms.authentication.kerberos.name.rules","DEFAULT"); conf.set("hadoop.kms.proxyuser.client.users","foo"); conf.set("hadoop.kms.proxyuser.client.hosts","*"); writeConf(testDir,conf); runServer(null,null,testDir,new KMSCallable(){ @Override public Void call() throws Exception { final Configuration conf=new Configuration(); conf.setInt(KeyProvider.DEFAULT_BITLENGTH_NAME,64); final URI uri=createKMSUri(getKMSUrl()); UserGroupInformation clientUgi=UserGroupInformation.loginUserFromKeytabAndReturnUGI("client",keytab.getAbsolutePath()); clientUgi.doAs(new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { final KeyProvider kp=new KMSClientProvider(uri,conf); kp.createKey("kAA",new KeyProvider.Options(conf)); UserGroupInformation fooUgi=UserGroupInformation.createRemoteUser("foo"); fooUgi.doAs(new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { Assert.assertNotNull(kp.createKey("kBB",new KeyProvider.Options(conf))); return null; } } ); UserGroupInformation foo1Ugi=UserGroupInformation.createRemoteUser("foo1"); foo1Ugi.doAs(new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { try { kp.createKey("kCC",new KeyProvider.Options(conf)); Assert.fail(); } catch ( AuthorizationException ex) { } catch ( Exception ex) { Assert.fail(ex.getMessage()); } return null; } } ); return null; } } ); return null; } } ); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testKMSProvider() throws Exception { Configuration conf=new Configuration(); conf.set("hadoop.security.authentication","kerberos"); UserGroupInformation.setConfiguration(conf); File confDir=getTestDir(); conf=createBaseKMSConf(confDir); writeConf(confDir,conf); runServer(null,null,confDir,new KMSCallable(){ @Override public Void call() throws Exception { Date started=new Date(); Configuration conf=new Configuration(); URI uri=createKMSUri(getKMSUrl()); KeyProvider kp=new KMSClientProvider(uri,conf); Assert.assertTrue(kp.getKeys().isEmpty()); Assert.assertEquals(0,kp.getKeysMetadata().length); KeyProvider.Options options=new KeyProvider.Options(conf); options.setCipher("AES/CTR/NoPadding"); options.setBitLength(128); options.setDescription("l1"); KeyProvider.KeyVersion kv0=kp.createKey("k1",options); Assert.assertNotNull(kv0); Assert.assertNotNull(kv0.getVersionName()); Assert.assertNotNull(kv0.getMaterial()); KeyProvider.KeyVersion kv1=kp.getKeyVersion(kv0.getVersionName()); Assert.assertEquals(kv0.getVersionName(),kv1.getVersionName()); Assert.assertNotNull(kv1.getMaterial()); KeyProvider.KeyVersion cv1=kp.getCurrentKey("k1"); Assert.assertEquals(kv0.getVersionName(),cv1.getVersionName()); Assert.assertNotNull(cv1.getMaterial()); KeyProvider.Metadata m1=kp.getMetadata("k1"); Assert.assertEquals("AES/CTR/NoPadding",m1.getCipher()); Assert.assertEquals("AES",m1.getAlgorithm()); Assert.assertEquals(128,m1.getBitLength()); Assert.assertEquals(1,m1.getVersions()); Assert.assertNotNull(m1.getCreated()); Assert.assertTrue(started.before(m1.getCreated())); List lkv1=kp.getKeyVersions("k1"); Assert.assertEquals(1,lkv1.size()); Assert.assertEquals(kv0.getVersionName(),lkv1.get(0).getVersionName()); Assert.assertNotNull(kv1.getMaterial()); KeyProvider.KeyVersion kv2=kp.rollNewVersion("k1"); Assert.assertNotSame(kv0.getVersionName(),kv2.getVersionName()); Assert.assertNotNull(kv2.getMaterial()); kv2=kp.getKeyVersion(kv2.getVersionName()); boolean eq=true; for (int i=0; i < kv1.getMaterial().length; i++) { eq=eq && kv1.getMaterial()[i] == kv2.getMaterial()[i]; } Assert.assertFalse(eq); KeyProvider.KeyVersion cv2=kp.getCurrentKey("k1"); Assert.assertEquals(kv2.getVersionName(),cv2.getVersionName()); Assert.assertNotNull(cv2.getMaterial()); eq=true; for (int i=0; i < kv1.getMaterial().length; i++) { eq=eq && cv2.getMaterial()[i] == kv2.getMaterial()[i]; } Assert.assertTrue(eq); List lkv2=kp.getKeyVersions("k1"); Assert.assertEquals(2,lkv2.size()); Assert.assertEquals(kv1.getVersionName(),lkv2.get(0).getVersionName()); Assert.assertNotNull(lkv2.get(0).getMaterial()); Assert.assertEquals(kv2.getVersionName(),lkv2.get(1).getVersionName()); Assert.assertNotNull(lkv2.get(1).getMaterial()); KeyProvider.Metadata m2=kp.getMetadata("k1"); Assert.assertEquals("AES/CTR/NoPadding",m2.getCipher()); Assert.assertEquals("AES",m2.getAlgorithm()); Assert.assertEquals(128,m2.getBitLength()); Assert.assertEquals(2,m2.getVersions()); Assert.assertNotNull(m2.getCreated()); Assert.assertTrue(started.before(m2.getCreated())); List ks1=kp.getKeys(); Assert.assertEquals(1,ks1.size()); Assert.assertEquals("k1",ks1.get(0)); KeyProvider.Metadata[] kms1=kp.getKeysMetadata("k1"); Assert.assertEquals(1,kms1.length); Assert.assertEquals("AES/CTR/NoPadding",kms1[0].getCipher()); Assert.assertEquals("AES",kms1[0].getAlgorithm()); Assert.assertEquals(128,kms1[0].getBitLength()); Assert.assertEquals(2,kms1[0].getVersions()); Assert.assertNotNull(kms1[0].getCreated()); Assert.assertTrue(started.before(kms1[0].getCreated())); KeyProvider.KeyVersion kv=kp.getCurrentKey("k1"); KeyProviderCryptoExtension kpExt=KeyProviderCryptoExtension.createKeyProviderCryptoExtension(kp); EncryptedKeyVersion ek1=kpExt.generateEncryptedKey(kv.getName()); Assert.assertEquals(KeyProviderCryptoExtension.EEK,ek1.getEncryptedKeyVersion().getVersionName()); Assert.assertNotNull(ek1.getEncryptedKeyVersion().getMaterial()); Assert.assertEquals(kv.getMaterial().length,ek1.getEncryptedKeyVersion().getMaterial().length); KeyProvider.KeyVersion k1=kpExt.decryptEncryptedKey(ek1); Assert.assertEquals(KeyProviderCryptoExtension.EK,k1.getVersionName()); KeyProvider.KeyVersion k1a=kpExt.decryptEncryptedKey(ek1); Assert.assertArrayEquals(k1.getMaterial(),k1a.getMaterial()); Assert.assertEquals(kv.getMaterial().length,k1.getMaterial().length); EncryptedKeyVersion ek2=kpExt.generateEncryptedKey(kv.getName()); KeyProvider.KeyVersion k2=kpExt.decryptEncryptedKey(ek2); boolean isEq=true; for (int i=0; isEq && i < ek2.getEncryptedKeyVersion().getMaterial().length; i++) { isEq=k2.getMaterial()[i] == k1.getMaterial()[i]; } Assert.assertFalse(isEq); kp.deleteKey("k1"); Assert.assertNull(kp.getKeyVersion("k1")); Assert.assertNull(kp.getKeyVersions("k1")); Assert.assertNull(kp.getMetadata("k1")); Assert.assertTrue(kp.getKeys().isEmpty()); Assert.assertEquals(0,kp.getKeysMetadata().length); options=new KeyProvider.Options(conf); options.setCipher("AES/CTR/NoPadding"); options.setBitLength(128); kp.createKey("k2",options); KeyProvider.Metadata meta=kp.getMetadata("k2"); Assert.assertNull(meta.getDescription()); Assert.assertTrue(meta.getAttributes().isEmpty()); options=new KeyProvider.Options(conf); options.setCipher("AES/CTR/NoPadding"); options.setBitLength(128); options.setDescription("d"); kp.createKey("k3",options); meta=kp.getMetadata("k3"); Assert.assertEquals("d",meta.getDescription()); Assert.assertTrue(meta.getAttributes().isEmpty()); Map attributes=new HashMap(); attributes.put("a","A"); options=new KeyProvider.Options(conf); options.setCipher("AES/CTR/NoPadding"); options.setBitLength(128); options.setAttributes(attributes); kp.createKey("k4",options); meta=kp.getMetadata("k4"); Assert.assertNull(meta.getDescription()); Assert.assertEquals(attributes,meta.getAttributes()); options=new KeyProvider.Options(conf); options.setCipher("AES/CTR/NoPadding"); options.setBitLength(128); options.setDescription("d"); options.setAttributes(attributes); kp.createKey("k5",options); meta=kp.getMetadata("k5"); Assert.assertEquals("d",meta.getDescription()); Assert.assertEquals(attributes,meta.getAttributes()); KeyProviderDelegationTokenExtension kpdte=KeyProviderDelegationTokenExtension.createKeyProviderDelegationTokenExtension(kp); Credentials credentials=new Credentials(); kpdte.addDelegationTokens("foo",credentials); Assert.assertEquals(1,credentials.getAllTokens().size()); InetSocketAddress kmsAddr=new InetSocketAddress(getKMSUrl().getHost(),getKMSUrl().getPort()); Assert.assertEquals(new Text("kms-dt"),credentials.getToken(SecurityUtil.buildTokenService(kmsAddr)).getKind()); return null; } } ); }

Class: org.apache.hadoop.fs.FSMainOperationsBaseTest

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testGlobStatusFilterWithMultiplePathWildcardsAndNonTrivialFilter() throws Exception { Path[] testDirs={getTestRootPath(fSys,TEST_DIR_AAA),getTestRootPath(fSys,TEST_DIR_AXA),getTestRootPath(fSys,TEST_DIR_AXX),getTestRootPath(fSys,TEST_DIR_AXX)}; if (exists(fSys,testDirs[0]) == false) { for ( Path path : testDirs) { fSys.mkdirs(path); } } FileStatus[] filteredPaths=fSys.globStatus(getTestRootPath(fSys,"test/hadoop/a??"),TEST_X_FILTER); Assert.assertEquals(2,filteredPaths.length); Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AXA),filteredPaths)); Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AXX),filteredPaths)); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testGlobStatusSomeMatchesInDirectories() throws Exception { Path[] testDirs={getTestRootPath(fSys,TEST_DIR_AAA),getTestRootPath(fSys,TEST_DIR_AXA),getTestRootPath(fSys,TEST_DIR_AXX),getTestRootPath(fSys,TEST_DIR_AAA2)}; if (exists(fSys,testDirs[0]) == false) { for ( Path path : testDirs) { fSys.mkdirs(path); } } FileStatus[] paths=fSys.globStatus(getTestRootPath(fSys,"test/hadoop*")); Assert.assertEquals(2,paths.length); Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,"test/hadoop"),paths)); Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,"test/hadoop2"),paths)); }

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testOverwrite() throws IOException { Path path=getTestRootPath(fSys,"test/hadoop/file"); fSys.mkdirs(path.getParent()); createFile(path); Assert.assertTrue("Exists",exists(fSys,path)); Assert.assertEquals("Length",data.length,fSys.getFileStatus(path).getLen()); try { createFile(path); Assert.fail("Should throw IOException."); } catch ( IOException e) { } FSDataOutputStream out=fSys.create(path,true,4096); out.write(data,0,data.length); out.close(); Assert.assertTrue("Exists",exists(fSys,path)); Assert.assertEquals("Length",data.length,fSys.getFileStatus(path).getLen()); }

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
@Test public void testMkdirsFailsForSubdirectoryOfExistingFile() throws Exception { Path testDir=getTestRootPath(fSys,"test/hadoop"); Assert.assertFalse(exists(fSys,testDir)); fSys.mkdirs(testDir); Assert.assertTrue(exists(fSys,testDir)); createFile(getTestRootPath(fSys,"test/hadoop/file")); Path testSubDir=getTestRootPath(fSys,"test/hadoop/file/subdir"); try { fSys.mkdirs(testSubDir); Assert.fail("Should throw IOException."); } catch ( IOException e) { } Assert.assertFalse(exists(fSys,testSubDir)); Path testDeepSubDir=getTestRootPath(fSys,"test/hadoop/file/deep/sub/dir"); try { fSys.mkdirs(testDeepSubDir); Assert.fail("Should throw IOException."); } catch ( IOException e) { } Assert.assertFalse(exists(fSys,testDeepSubDir)); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testWorkingDirectory() throws Exception { Path workDir=new Path(getAbsoluteTestRootPath(fSys),new Path("test")); fSys.setWorkingDirectory(workDir); Assert.assertEquals(workDir,fSys.getWorkingDirectory()); fSys.setWorkingDirectory(new Path(".")); Assert.assertEquals(workDir,fSys.getWorkingDirectory()); fSys.setWorkingDirectory(new Path("..")); Assert.assertEquals(workDir.getParent(),fSys.getWorkingDirectory()); workDir=new Path(getAbsoluteTestRootPath(fSys),new Path("test")); fSys.setWorkingDirectory(workDir); Assert.assertEquals(workDir,fSys.getWorkingDirectory()); Path relativeDir=new Path("existingDir1"); Path absoluteDir=new Path(workDir,"existingDir1"); fSys.mkdirs(absoluteDir); fSys.setWorkingDirectory(relativeDir); Assert.assertEquals(absoluteDir,fSys.getWorkingDirectory()); absoluteDir=getTestRootPath(fSys,"test/existingDir2"); fSys.mkdirs(absoluteDir); fSys.setWorkingDirectory(absoluteDir); Assert.assertEquals(absoluteDir,fSys.getWorkingDirectory()); Path absolutePath=new Path(absoluteDir,"foo"); createFile(fSys,absolutePath); fSys.open(new Path("foo")).close(); fSys.mkdirs(new Path("newDir")); Assert.assertTrue(isDir(fSys,new Path(absoluteDir,"newDir"))); }

UtilityVerifier BooleanVerifier HybridVerifier 
@Test public void testRenameDirectoryToItself() throws Exception { if (!renameSupported()) return; Path src=getTestRootPath(fSys,"test/hadoop/dir"); fSys.mkdirs(src); try { rename(src,src,false,true,false,Rename.NONE); Assert.fail("Renamed directory to itself"); } catch ( IOException e) { Assert.assertTrue(unwrapException(e) instanceof FileAlreadyExistsException); } try { rename(src,src,false,true,false,Rename.OVERWRITE); Assert.fail("Renamed directory to itself"); } catch ( IOException e) { } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testGlobStatusFilterWithMultipleWildCardMatchesAndTrivialFilter() throws Exception { Path[] testDirs={getTestRootPath(fSys,TEST_DIR_AAA),getTestRootPath(fSys,TEST_DIR_AXA),getTestRootPath(fSys,TEST_DIR_AXX),getTestRootPath(fSys,TEST_DIR_AXX)}; if (exists(fSys,testDirs[0]) == false) { for ( Path path : testDirs) { fSys.mkdirs(path); } } FileStatus[] filteredPaths=fSys.globStatus(getTestRootPath(fSys,"test/hadoop/a??"),DEFAULT_FILTER); Assert.assertEquals(3,filteredPaths.length); Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AAA),filteredPaths)); Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AXA),filteredPaths)); Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AXX),filteredPaths)); }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testGlobStatusNonExistentFile() throws Exception { FileStatus[] paths=fSys.globStatus(getTestRootPath(fSys,"test/hadoopfsdf")); Assert.assertNull(paths); paths=fSys.globStatus(getTestRootPath(fSys,"test/hadoopfsdf/?")); Assert.assertEquals(0,paths.length); paths=fSys.globStatus(getTestRootPath(fSys,"test/hadoopfsdf/xyz*/?")); Assert.assertEquals(0,paths.length); }

UtilityVerifier BooleanVerifier HybridVerifier 
@Test public void testRenameDirectoryAsEmptyDirectory() throws Exception { if (!renameSupported()) return; Path src=getTestRootPath(fSys,"test/hadoop/dir"); fSys.mkdirs(src); createFile(getTestRootPath(fSys,"test/hadoop/dir/file1")); createFile(getTestRootPath(fSys,"test/hadoop/dir/subdir/file2")); Path dst=getTestRootPath(fSys,"test/new/newdir"); fSys.mkdirs(dst); try { rename(src,dst,false,true,false,Rename.NONE); Assert.fail("Expected exception was not thrown"); } catch ( IOException e) { Assert.assertTrue(unwrapException(e) instanceof FileAlreadyExistsException); } rename(src,dst,true,false,true,Rename.OVERWRITE); }

UtilityVerifier BooleanVerifier HybridVerifier 
@Test public void testRenameFileToNonExistentDirectory() throws Exception { if (!renameSupported()) return; Path src=getTestRootPath(fSys,"test/hadoop/file"); createFile(src); Path dst=getTestRootPath(fSys,"test/nonExistent/newfile"); try { rename(src,dst,false,true,false,Rename.NONE); Assert.fail("Expected exception was not thrown"); } catch ( IOException e) { Assert.assertTrue(unwrapException(e) instanceof FileNotFoundException); } try { rename(src,dst,false,true,false,Rename.OVERWRITE); Assert.fail("Expected exception was not thrown"); } catch ( IOException e) { Assert.assertTrue(unwrapException(e) instanceof FileNotFoundException); } }

UtilityVerifier BooleanVerifier HybridVerifier 
@Test public void testRenameFileToItself() throws Exception { if (!renameSupported()) return; Path src=getTestRootPath(fSys,"test/hadoop/file"); createFile(src); try { rename(src,src,false,true,false,Rename.NONE); Assert.fail("Renamed file to itself"); } catch ( IOException e) { Assert.assertTrue(unwrapException(e) instanceof FileAlreadyExistsException); } try { rename(src,src,false,true,false,Rename.OVERWRITE); Assert.fail("Renamed file to itself"); } catch ( IOException e) { } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testWriteInNonExistentDirectory() throws IOException { Path path=getTestRootPath(fSys,"test/hadoop/file"); Assert.assertFalse("Parent doesn't exist",exists(fSys,path.getParent())); createFile(path); Assert.assertTrue("Exists",exists(fSys,path)); Assert.assertEquals("Length",data.length,fSys.getFileStatus(path).getLen()); Assert.assertTrue("Parent exists",exists(fSys,path.getParent())); }

UtilityVerifier BooleanVerifier HybridVerifier 
@Test public void testRenameNonExistentPath() throws Exception { if (!renameSupported()) return; Path src=getTestRootPath(fSys,"test/hadoop/nonExistent"); Path dst=getTestRootPath(fSys,"test/new/newpath"); try { rename(src,dst,false,false,false,Rename.NONE); Assert.fail("Should throw FileNotFoundException"); } catch ( IOException e) { Log.info("XXX",e); Assert.assertTrue(unwrapException(e) instanceof FileNotFoundException); } try { rename(src,dst,false,false,false,Rename.OVERWRITE); Assert.fail("Should throw FileNotFoundException"); } catch ( IOException e) { Assert.assertTrue(unwrapException(e) instanceof FileNotFoundException); } }

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
@Test public void testDeleteRecursively() throws IOException { Path dir=getTestRootPath(fSys,"test/hadoop"); Path file=getTestRootPath(fSys,"test/hadoop/file"); Path subdir=getTestRootPath(fSys,"test/hadoop/subdir"); createFile(file); fSys.mkdirs(subdir); Assert.assertTrue("File exists",exists(fSys,file)); Assert.assertTrue("Dir exists",exists(fSys,dir)); Assert.assertTrue("Subdir exists",exists(fSys,subdir)); try { fSys.delete(dir,false); Assert.fail("Should throw IOException."); } catch ( IOException e) { } Assert.assertTrue("File still exists",exists(fSys,file)); Assert.assertTrue("Dir still exists",exists(fSys,dir)); Assert.assertTrue("Subdir still exists",exists(fSys,subdir)); Assert.assertTrue("Deleted",fSys.delete(dir,true)); Assert.assertFalse("File doesn't exist",exists(fSys,file)); Assert.assertFalse("Dir doesn't exist",exists(fSys,dir)); Assert.assertFalse("Subdir doesn't exist",exists(fSys,subdir)); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testGlobStatusFilterWithMultiplePathMatchesAndNonTrivialFilter() throws Exception { Path[] testDirs={getTestRootPath(fSys,TEST_DIR_AAA),getTestRootPath(fSys,TEST_DIR_AXA),getTestRootPath(fSys,TEST_DIR_AXX),getTestRootPath(fSys,TEST_DIR_AXX)}; if (exists(fSys,testDirs[0]) == false) { for ( Path path : testDirs) { fSys.mkdirs(path); } } FileStatus[] filteredPaths=fSys.globStatus(getTestRootPath(fSys,"test/hadoop/*"),TEST_X_FILTER); Assert.assertEquals(2,filteredPaths.length); Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AXA),filteredPaths)); Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AXX),filteredPaths)); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testListStatus() throws Exception { Path[] testDirs={getTestRootPath(fSys,"test/hadoop/a"),getTestRootPath(fSys,"test/hadoop/b"),getTestRootPath(fSys,"test/hadoop/c/1")}; Assert.assertFalse(exists(fSys,testDirs[0])); for ( Path path : testDirs) { fSys.mkdirs(path); } FileStatus[] paths=fSys.listStatus(getTestRootPath(fSys,"test")); Assert.assertEquals(1,paths.length); Assert.assertEquals(getTestRootPath(fSys,"test/hadoop"),paths[0].getPath()); paths=fSys.listStatus(getTestRootPath(fSys,"test/hadoop")); Assert.assertEquals(3,paths.length); Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,"test/hadoop/a"),paths)); Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,"test/hadoop/b"),paths)); Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,"test/hadoop/c"),paths)); paths=fSys.listStatus(getTestRootPath(fSys,"test/hadoop/a")); Assert.assertEquals(0,paths.length); }

InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier 
@Test public void testFsStatus() throws Exception { FsStatus fsStatus=fSys.getStatus(null); Assert.assertNotNull(fsStatus); Assert.assertTrue(fsStatus.getUsed() >= 0); Assert.assertTrue(fsStatus.getRemaining() >= 0); Assert.assertTrue(fsStatus.getCapacity() >= 0); }

UtilityVerifier BooleanVerifier HybridVerifier 
@Test public void testRenameDirectoryAsNonEmptyDirectory() throws Exception { if (!renameSupported()) return; Path src=getTestRootPath(fSys,"test/hadoop/dir"); fSys.mkdirs(src); createFile(getTestRootPath(fSys,"test/hadoop/dir/file1")); createFile(getTestRootPath(fSys,"test/hadoop/dir/subdir/file2")); Path dst=getTestRootPath(fSys,"test/new/newdir"); fSys.mkdirs(dst); createFile(getTestRootPath(fSys,"test/new/newdir/file1")); try { rename(src,dst,false,true,false,Rename.NONE); Assert.fail("Expected exception was not thrown"); } catch ( IOException e) { Assert.assertTrue(unwrapException(e) instanceof FileAlreadyExistsException); } try { rename(src,dst,false,true,false,Rename.OVERWRITE); Assert.fail("Expected exception was not thrown"); } catch ( IOException ex) { } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testGlobStatusFilterWithSomePathMatchesAndTrivialFilter() throws Exception { Path[] testDirs={getTestRootPath(fSys,TEST_DIR_AAA),getTestRootPath(fSys,TEST_DIR_AXA),getTestRootPath(fSys,TEST_DIR_AXX),getTestRootPath(fSys,TEST_DIR_AXX)}; if (exists(fSys,testDirs[0]) == false) { for ( Path path : testDirs) { fSys.mkdirs(path); } } FileStatus[] filteredPaths=fSys.globStatus(getTestRootPath(fSys,"test/hadoop/*"),DEFAULT_FILTER); Assert.assertEquals(3,filteredPaths.length); Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AAA),filteredPaths)); Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AXA),filteredPaths)); Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AXX),filteredPaths)); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testGlobStatusWithMultipleMatchesOfSingleChar() throws Exception { Path[] testDirs={getTestRootPath(fSys,TEST_DIR_AAA),getTestRootPath(fSys,TEST_DIR_AXA),getTestRootPath(fSys,TEST_DIR_AXX),getTestRootPath(fSys,TEST_DIR_AAA2)}; if (exists(fSys,testDirs[0]) == false) { for ( Path path : testDirs) { fSys.mkdirs(path); } } FileStatus[] paths=fSys.globStatus(getTestRootPath(fSys,"test/hadoop/ax?")); Assert.assertEquals(2,paths.length); Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AXA),paths)); Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AXX),paths)); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testGlobStatusWithMultipleWildCardMatches() throws Exception { Path[] testDirs={getTestRootPath(fSys,TEST_DIR_AAA),getTestRootPath(fSys,TEST_DIR_AXA),getTestRootPath(fSys,TEST_DIR_AXX),getTestRootPath(fSys,TEST_DIR_AAA2)}; if (exists(fSys,testDirs[0]) == false) { for ( Path path : testDirs) { fSys.mkdirs(path); } } FileStatus[] paths=fSys.globStatus(getTestRootPath(fSys,"test/hadoop*/*")); Assert.assertEquals(4,paths.length); Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AAA),paths)); Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AXA),paths)); Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AXX),paths)); Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AAA2),paths)); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testListStatusFilterWithSomeMatches() throws Exception { Path[] testDirs={getTestRootPath(fSys,TEST_DIR_AAA),getTestRootPath(fSys,TEST_DIR_AXA),getTestRootPath(fSys,TEST_DIR_AXX),getTestRootPath(fSys,TEST_DIR_AAA2)}; if (exists(fSys,testDirs[0]) == false) { for ( Path path : testDirs) { fSys.mkdirs(path); } } FileStatus[] filteredPaths=fSys.listStatus(getTestRootPath(fSys,"test/hadoop"),TEST_X_FILTER); Assert.assertEquals(2,filteredPaths.length); Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AXA),filteredPaths)); Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AXX),filteredPaths)); }

UtilityVerifier BooleanVerifier HybridVerifier 
@Test public void testRenameFileAsExistingFile() throws Exception { if (!renameSupported()) return; Path src=getTestRootPath(fSys,"test/hadoop/file"); createFile(src); Path dst=getTestRootPath(fSys,"test/new/existingFile"); createFile(dst); try { rename(src,dst,false,true,false,Rename.NONE); Assert.fail("Expected exception was not thrown"); } catch ( IOException e) { Assert.assertTrue(unwrapException(e) instanceof FileAlreadyExistsException); } rename(src,dst,true,false,true,Rename.OVERWRITE); }

Class: org.apache.hadoop.fs.FileContextMainOperationsBaseTest

UtilityVerifier ExceptionVerifier HybridVerifier 
@Test(expected=FileAlreadyExistsException.class) public void testCreateFlagCreateExistingFile() throws IOException { Path p=getTestRootPath(fc,"test/testCreateFlagCreateExistingFile"); createFile(p); fc.create(p,EnumSet.of(CREATE)); Assert.fail("Excepted exception not thrown"); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testGlobStatusSomeMatchesInDirectories() throws Exception { Path[] testDirs={getTestRootPath(fc,TEST_DIR_AAA),getTestRootPath(fc,TEST_DIR_AXA),getTestRootPath(fc,TEST_DIR_AXX),getTestRootPath(fc,TEST_DIR_AAA2)}; if (exists(fc,testDirs[0]) == false) { for ( Path path : testDirs) { fc.mkdir(path,FsPermission.getDefault(),true); } } FileStatus[] paths=fc.util().globStatus(getTestRootPath(fc,"test/hadoop*")); Assert.assertEquals(2,paths.length); Assert.assertTrue(containsPath(getTestRootPath(fc,"test/hadoop"),paths)); Assert.assertTrue(containsPath(getTestRootPath(fc,"test/hadoop2"),paths)); }

UtilityVerifier BooleanVerifier HybridVerifier 
@Test public void testRenameNonExistentPath() throws Exception { if (!renameSupported()) return; Path src=getTestRootPath(fc,"test/hadoop/nonExistent"); Path dst=getTestRootPath(fc,"test/new/newpath"); try { rename(src,dst,false,false,false,Rename.NONE); Assert.fail("Should throw FileNotFoundException"); } catch ( IOException e) { Assert.assertTrue(unwrapException(e) instanceof FileNotFoundException); } try { rename(src,dst,false,false,false,Rename.OVERWRITE); Assert.fail("Should throw FileNotFoundException"); } catch ( IOException e) { Assert.assertTrue(unwrapException(e) instanceof FileNotFoundException); } }

UtilityVerifier ExceptionVerifier HybridVerifier 
@Test(expected=HadoopIllegalArgumentException.class) public void testEmptyCreateFlag() throws IOException { Path p=getTestRootPath(fc,"test/file"); fc.create(p,EnumSet.noneOf(CreateFlag.class)); Assert.fail("Excepted exception not thrown"); }

UtilityVerifier ExceptionVerifier HybridVerifier 
@Test(expected=HadoopIllegalArgumentException.class) public void testCreateFlagAppendOverwrite() throws IOException { Path p=getTestRootPath(fc,"test/nonExistent"); fc.create(p,EnumSet.of(APPEND,OVERWRITE)); Assert.fail("Excepted exception not thrown"); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testGlobStatusFilterWithMultiplePathMatchesAndNonTrivialFilter() throws Exception { Path[] testDirs={getTestRootPath(fc,TEST_DIR_AAA),getTestRootPath(fc,TEST_DIR_AXA),getTestRootPath(fc,TEST_DIR_AXX),getTestRootPath(fc,TEST_DIR_AXX)}; if (exists(fc,testDirs[0]) == false) { for ( Path path : testDirs) { fc.mkdir(path,FsPermission.getDefault(),true); } } FileStatus[] filteredPaths=fc.util().globStatus(getTestRootPath(fc,"test/hadoop/*"),TEST_X_FILTER); Assert.assertEquals(2,filteredPaths.length); Assert.assertTrue(containsPath(getTestRootPath(fc,TEST_DIR_AXA),filteredPaths)); Assert.assertTrue(containsPath(getTestRootPath(fc,TEST_DIR_AXX),filteredPaths)); }

UtilityVerifier ExceptionVerifier HybridVerifier 
@Test(expected=HadoopIllegalArgumentException.class) public void testCreateFlagAppendCreateOverwrite() throws IOException { Path p=getTestRootPath(fc,"test/nonExistent"); fc.create(p,EnumSet.of(CREATE,APPEND,OVERWRITE)); Assert.fail("Excepted exception not thrown"); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testGlobStatusWithMultipleWildCardMatches() throws Exception { Path[] testDirs={getTestRootPath(fc,TEST_DIR_AAA),getTestRootPath(fc,TEST_DIR_AXA),getTestRootPath(fc,TEST_DIR_AXX),getTestRootPath(fc,TEST_DIR_AAA2)}; if (exists(fc,testDirs[0]) == false) { for ( Path path : testDirs) { fc.mkdir(path,FsPermission.getDefault(),true); } } FileStatus[] paths=fc.util().globStatus(getTestRootPath(fc,"test/hadoop*/*")); Assert.assertEquals(4,paths.length); Assert.assertTrue(containsPath(getTestRootPath(fc,TEST_DIR_AAA),paths)); Assert.assertTrue(containsPath(getTestRootPath(fc,TEST_DIR_AXA),paths)); Assert.assertTrue(containsPath(getTestRootPath(fc,TEST_DIR_AXX),paths)); Assert.assertTrue(containsPath(getTestRootPath(fc,TEST_DIR_AAA2),paths)); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testGlobStatusFilterWithMultiplePathWildcardsAndNonTrivialFilter() throws Exception { Path[] testDirs={getTestRootPath(fc,TEST_DIR_AAA),getTestRootPath(fc,TEST_DIR_AXA),getTestRootPath(fc,TEST_DIR_AXX),getTestRootPath(fc,TEST_DIR_AXX)}; if (exists(fc,testDirs[0]) == false) { for ( Path path : testDirs) { fc.mkdir(path,FsPermission.getDefault(),true); } } FileStatus[] filteredPaths=fc.util().globStatus(getTestRootPath(fc,"test/hadoop/a??"),TEST_X_FILTER); Assert.assertEquals(2,filteredPaths.length); Assert.assertTrue(containsPath(getTestRootPath(fc,TEST_DIR_AXA),filteredPaths)); Assert.assertTrue(containsPath(getTestRootPath(fc,TEST_DIR_AXX),filteredPaths)); }

APIUtilityVerifier BranchVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testUnsupportedSymlink() throws IOException { Path file=getTestRootPath(fc,"file"); Path link=getTestRootPath(fc,"linkToFile"); if (!fc.getDefaultFileSystem().supportsSymlinks()) { try { fc.createSymlink(file,link,false); Assert.fail("Created a symlink on a file system that " + "does not support symlinks."); } catch ( IOException e) { } createFile(file); try { fc.getLinkTarget(file); Assert.fail("Got a link target on a file system that " + "does not support symlinks."); } catch ( IOException e) { } Assert.assertEquals(fc.getFileStatus(file),fc.getFileLinkStatus(file)); } }

UtilityVerifier BooleanVerifier HybridVerifier 
@Test public void testRenameFileToNonExistentDirectory() throws Exception { if (!renameSupported()) return; Path src=getTestRootPath(fc,"test/hadoop/file"); createFile(src); Path dst=getTestRootPath(fc,"test/nonExistent/newfile"); try { rename(src,dst,false,true,false,Rename.NONE); Assert.fail("Expected exception was not thrown"); } catch ( IOException e) { Assert.assertTrue(unwrapException(e) instanceof FileNotFoundException); } try { rename(src,dst,false,true,false,Rename.OVERWRITE); Assert.fail("Expected exception was not thrown"); } catch ( IOException e) { Assert.assertTrue(unwrapException(e) instanceof FileNotFoundException); } }

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
@Test public void testDeleteRecursively() throws IOException { Path dir=getTestRootPath(fc,"test/hadoop"); Path file=getTestRootPath(fc,"test/hadoop/file"); Path subdir=getTestRootPath(fc,"test/hadoop/subdir"); createFile(file); fc.mkdir(subdir,FsPermission.getDefault(),true); Assert.assertTrue("File exists",exists(fc,file)); Assert.assertTrue("Dir exists",exists(fc,dir)); Assert.assertTrue("Subdir exists",exists(fc,subdir)); try { fc.delete(dir,false); Assert.fail("Should throw IOException."); } catch ( IOException e) { } Assert.assertTrue("File still exists",exists(fc,file)); Assert.assertTrue("Dir still exists",exists(fc,dir)); Assert.assertTrue("Subdir still exists",exists(fc,subdir)); Assert.assertTrue("Deleted",fc.delete(dir,true)); Assert.assertFalse("File doesn't exist",exists(fc,file)); Assert.assertFalse("Dir doesn't exist",exists(fc,dir)); Assert.assertFalse("Subdir doesn't exist",exists(fc,subdir)); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testGlobStatusFilterWithSomePathMatchesAndTrivialFilter() throws Exception { Path[] testDirs={getTestRootPath(fc,TEST_DIR_AAA),getTestRootPath(fc,TEST_DIR_AXA),getTestRootPath(fc,TEST_DIR_AXX),getTestRootPath(fc,TEST_DIR_AXX)}; if (exists(fc,testDirs[0]) == false) { for ( Path path : testDirs) { fc.mkdir(path,FsPermission.getDefault(),true); } } FileStatus[] filteredPaths=fc.util().globStatus(getTestRootPath(fc,"test/hadoop/*"),DEFAULT_FILTER); Assert.assertEquals(3,filteredPaths.length); Assert.assertTrue(containsPath(getTestRootPath(fc,TEST_DIR_AAA),filteredPaths)); Assert.assertTrue(containsPath(getTestRootPath(fc,TEST_DIR_AXA),filteredPaths)); Assert.assertTrue(containsPath(getTestRootPath(fc,TEST_DIR_AXX),filteredPaths)); }

UtilityVerifier BooleanVerifier HybridVerifier 
@Test public void testRenameDirectoryAsNonEmptyDirectory() throws Exception { if (!renameSupported()) return; Path src=getTestRootPath(fc,"test/hadoop/dir"); fc.mkdir(src,FileContext.DEFAULT_PERM,true); createFile(getTestRootPath(fc,"test/hadoop/dir/file1")); createFile(getTestRootPath(fc,"test/hadoop/dir/subdir/file2")); Path dst=getTestRootPath(fc,"test/new/newdir"); fc.mkdir(dst,FileContext.DEFAULT_PERM,true); createFile(getTestRootPath(fc,"test/new/newdir/file1")); try { rename(src,dst,false,true,false,Rename.NONE); Assert.fail("Expected exception was not thrown"); } catch ( IOException e) { Assert.assertTrue(unwrapException(e) instanceof FileAlreadyExistsException); } try { rename(src,dst,false,true,false,Rename.OVERWRITE); Assert.fail("Expected exception was not thrown"); } catch ( IOException ex) { } }

UtilityVerifier BooleanVerifier HybridVerifier 
@Test public void testRenameDirectoryAsEmptyDirectory() throws Exception { if (!renameSupported()) return; Path src=getTestRootPath(fc,"test/hadoop/dir"); fc.mkdir(src,FileContext.DEFAULT_PERM,true); createFile(getTestRootPath(fc,"test/hadoop/dir/file1")); createFile(getTestRootPath(fc,"test/hadoop/dir/subdir/file2")); Path dst=getTestRootPath(fc,"test/new/newdir"); fc.mkdir(dst,FileContext.DEFAULT_PERM,true); try { rename(src,dst,false,true,false,Rename.NONE); Assert.fail("Expected exception was not thrown"); } catch ( IOException e) { Assert.assertTrue(unwrapException(e) instanceof FileAlreadyExistsException); } rename(src,dst,true,false,true,Rename.OVERWRITE); }

UtilityVerifier ExceptionVerifier HybridVerifier 
@Test(expected=FileNotFoundException.class) public void testCreateFlagAppendNonExistingFile() throws IOException { Path p=getTestRootPath(fc,"test/testCreateFlagAppendNonExistingFile"); fc.create(p,EnumSet.of(APPEND)); Assert.fail("Excepted exception not thrown"); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testWriteInNonExistentDirectory() throws IOException { Path path=getTestRootPath(fc,"test/hadoop/file"); Assert.assertFalse("Parent doesn't exist",exists(fc,path.getParent())); createFile(path); Assert.assertTrue("Exists",exists(fc,path)); Assert.assertEquals("Length",data.length,fc.getFileStatus(path).getLen()); Assert.assertTrue("Parent exists",exists(fc,path.getParent())); }

UtilityVerifier BooleanVerifier HybridVerifier 
@Test public void testRenameDirectoryToItself() throws Exception { if (!renameSupported()) return; Path src=getTestRootPath(fc,"test/hadoop/dir"); fc.mkdir(src,FileContext.DEFAULT_PERM,true); try { rename(src,src,false,true,false,Rename.NONE); Assert.fail("Renamed directory to itself"); } catch ( IOException e) { Assert.assertTrue(unwrapException(e) instanceof FileAlreadyExistsException); } try { rename(src,src,false,true,false,Rename.OVERWRITE); Assert.fail("Renamed directory to itself"); } catch ( IOException e) { Assert.assertTrue(unwrapException(e) instanceof FileAlreadyExistsException); } }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testGlobStatusNonExistentFile() throws Exception { FileStatus[] paths=fc.util().globStatus(getTestRootPath(fc,"test/hadoopfsdf")); Assert.assertNull(paths); paths=fc.util().globStatus(getTestRootPath(fc,"test/hadoopfsdf/?")); Assert.assertEquals(0,paths.length); paths=fc.util().globStatus(getTestRootPath(fc,"test/hadoopfsdf/xyz*/?")); Assert.assertEquals(0,paths.length); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testGlobStatusFilterWithMultipleWildCardMatchesAndTrivialFilter() throws Exception { Path[] testDirs={getTestRootPath(fc,TEST_DIR_AAA),getTestRootPath(fc,TEST_DIR_AXA),getTestRootPath(fc,TEST_DIR_AXX),getTestRootPath(fc,TEST_DIR_AXX)}; if (exists(fc,testDirs[0]) == false) { for ( Path path : testDirs) { fc.mkdir(path,FsPermission.getDefault(),true); } } FileStatus[] filteredPaths=fc.util().globStatus(getTestRootPath(fc,"test/hadoop/a??"),DEFAULT_FILTER); Assert.assertEquals(3,filteredPaths.length); Assert.assertTrue(containsPath(getTestRootPath(fc,TEST_DIR_AAA),filteredPaths)); Assert.assertTrue(containsPath(getTestRootPath(fc,TEST_DIR_AXA),filteredPaths)); Assert.assertTrue(containsPath(getTestRootPath(fc,TEST_DIR_AXX),filteredPaths)); }

UtilityVerifier BooleanVerifier HybridVerifier 
@Test public void testRenameDirectoryToNonExistentParent() throws Exception { if (!renameSupported()) return; Path src=getTestRootPath(fc,"test/hadoop/dir"); fc.mkdir(src,FileContext.DEFAULT_PERM,true); Path dst=getTestRootPath(fc,"test/nonExistent/newdir"); try { rename(src,dst,false,true,false,Rename.NONE); Assert.fail("Expected exception was not thrown"); } catch ( IOException e) { Assert.assertTrue(unwrapException(e) instanceof FileNotFoundException); } try { rename(src,dst,false,true,false,Rename.OVERWRITE); Assert.fail("Expected exception was not thrown"); } catch ( IOException e) { Assert.assertTrue(unwrapException(e) instanceof FileNotFoundException); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testSetVerifyChecksum() throws IOException { final Path rootPath=getTestRootPath(fc,"test"); final Path path=new Path(rootPath,"zoo"); FSDataOutputStream out=fc.create(path,EnumSet.of(CREATE),Options.CreateOpts.createParent()); try { fc.setVerifyChecksum(true,path); out.write(data,0,data.length); } finally { out.close(); } FileStatus fileStatus=fc.getFileStatus(path); final long len=fileStatus.getLen(); assertTrue(len == data.length); byte[] bb=new byte[(int)len]; FSDataInputStream fsdis=fc.open(path); try { fsdis.read(bb); } finally { fsdis.close(); } assertArrayEquals(data,bb); }

UtilityVerifier ExceptionVerifier HybridVerifier 
@Test(expected=FileNotFoundException.class) public void testCreateFlagOverwriteNonExistingFile() throws IOException { Path p=getTestRootPath(fc,"test/testCreateFlagOverwriteNonExistingFile"); fc.create(p,EnumSet.of(OVERWRITE)); Assert.fail("Excepted exception not thrown"); }

UtilityVerifier BooleanVerifier HybridVerifier 
@Test public void testRenameFileToItself() throws Exception { if (!renameSupported()) return; Path src=getTestRootPath(fc,"test/hadoop/file"); createFile(src); try { rename(src,src,false,true,false,Rename.NONE); Assert.fail("Renamed file to itself"); } catch ( IOException e) { Assert.assertTrue(unwrapException(e) instanceof FileAlreadyExistsException); } try { rename(src,src,false,true,false,Rename.OVERWRITE); Assert.fail("Renamed file to itself"); } catch ( IOException e) { Assert.assertTrue(unwrapException(e) instanceof FileAlreadyExistsException); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testGlobStatusWithMultipleMatchesOfSingleChar() throws Exception { Path[] testDirs={getTestRootPath(fc,TEST_DIR_AAA),getTestRootPath(fc,TEST_DIR_AXA),getTestRootPath(fc,TEST_DIR_AXX),getTestRootPath(fc,TEST_DIR_AAA2)}; if (exists(fc,testDirs[0]) == false) { for ( Path path : testDirs) { fc.mkdir(path,FsPermission.getDefault(),true); } } FileStatus[] paths=fc.util().globStatus(getTestRootPath(fc,"test/hadoop/ax?")); Assert.assertEquals(2,paths.length); Assert.assertTrue(containsPath(getTestRootPath(fc,TEST_DIR_AXA),paths)); Assert.assertTrue(containsPath(getTestRootPath(fc,TEST_DIR_AXX),paths)); }

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testWorkingDirectory() throws Exception { Path workDir=new Path(fileContextTestHelper.getAbsoluteTestRootPath(fc),new Path("test")); fc.setWorkingDirectory(workDir); Assert.assertEquals(workDir,fc.getWorkingDirectory()); fc.setWorkingDirectory(new Path(".")); Assert.assertEquals(workDir,fc.getWorkingDirectory()); fc.setWorkingDirectory(new Path("..")); Assert.assertEquals(workDir.getParent(),fc.getWorkingDirectory()); workDir=new Path(fileContextTestHelper.getAbsoluteTestRootPath(fc),new Path("test")); fc.setWorkingDirectory(workDir); Assert.assertEquals(workDir,fc.getWorkingDirectory()); Path relativeDir=new Path("existingDir1"); Path absoluteDir=new Path(workDir,"existingDir1"); fc.mkdir(absoluteDir,FileContext.DEFAULT_PERM,true); fc.setWorkingDirectory(relativeDir); Assert.assertEquals(absoluteDir,fc.getWorkingDirectory()); absoluteDir=getTestRootPath(fc,"test/existingDir2"); fc.mkdir(absoluteDir,FileContext.DEFAULT_PERM,true); fc.setWorkingDirectory(absoluteDir); Assert.assertEquals(absoluteDir,fc.getWorkingDirectory()); Path absolutePath=new Path(absoluteDir,"foo"); fc.create(absolutePath,EnumSet.of(CREATE)).close(); fc.open(new Path("foo")).close(); fc.mkdir(new Path("newDir"),FileContext.DEFAULT_PERM,true); Assert.assertTrue(isDir(fc,new Path(absoluteDir,"newDir"))); absoluteDir=getTestRootPath(fc,"nonexistingPath"); try { fc.setWorkingDirectory(absoluteDir); Assert.fail("cd to non existing dir should have failed"); } catch ( Exception e) { } absoluteDir=new Path(localFsRootPath,"existingDir"); fc.mkdir(absoluteDir,FileContext.DEFAULT_PERM,true); fc.setWorkingDirectory(absoluteDir); Assert.assertEquals(absoluteDir,fc.getWorkingDirectory()); Path aRegularFile=new Path("aRegularFile"); createFile(aRegularFile); try { fc.setWorkingDirectory(aRegularFile); fail("An IOException expected."); } catch ( IOException ioe) { } }

TestCleaner InternalCallVerifier BooleanVerifier HybridVerifier 
@After public void tearDown() throws Exception { boolean del=fc.delete(new Path(fileContextTestHelper.getAbsoluteTestRootPath(fc),new Path("test")),true); assertTrue(del); fc.delete(localFsRootPath,true); }

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
@Test public void testMkdirsFailsForSubdirectoryOfExistingFile() throws Exception { Path testDir=getTestRootPath(fc,"test/hadoop"); Assert.assertFalse(exists(fc,testDir)); fc.mkdir(testDir,FsPermission.getDefault(),true); Assert.assertTrue(exists(fc,testDir)); createFile(getTestRootPath(fc,"test/hadoop/file")); Path testSubDir=getTestRootPath(fc,"test/hadoop/file/subdir"); try { fc.mkdir(testSubDir,FsPermission.getDefault(),true); Assert.fail("Should throw IOException."); } catch ( IOException e) { } Assert.assertFalse(exists(fc,testSubDir)); Path testDeepSubDir=getTestRootPath(fc,"test/hadoop/file/deep/sub/dir"); try { fc.mkdir(testDeepSubDir,FsPermission.getDefault(),true); Assert.fail("Should throw IOException."); } catch ( IOException e) { } Assert.assertFalse(exists(fc,testDeepSubDir)); }

InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier 
@Test public void testFsStatus() throws Exception { FsStatus fsStatus=fc.getFsStatus(null); Assert.assertNotNull(fsStatus); Assert.assertTrue(fsStatus.getUsed() >= 0); Assert.assertTrue(fsStatus.getRemaining() >= 0); Assert.assertTrue(fsStatus.getCapacity() >= 0); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testListStatus() throws Exception { Path[] testDirs={getTestRootPath(fc,"test/hadoop/a"),getTestRootPath(fc,"test/hadoop/b"),getTestRootPath(fc,"test/hadoop/c/1")}; Assert.assertFalse(exists(fc,testDirs[0])); for ( Path path : testDirs) { fc.mkdir(path,FsPermission.getDefault(),true); } FileStatus[] paths=fc.util().listStatus(getTestRootPath(fc,"test")); Assert.assertEquals(1,paths.length); Assert.assertEquals(getTestRootPath(fc,"test/hadoop"),paths[0].getPath()); paths=fc.util().listStatus(getTestRootPath(fc,"test/hadoop")); Assert.assertEquals(3,paths.length); Assert.assertTrue(containsPath(getTestRootPath(fc,"test/hadoop/a"),paths)); Assert.assertTrue(containsPath(getTestRootPath(fc,"test/hadoop/b"),paths)); Assert.assertTrue(containsPath(getTestRootPath(fc,"test/hadoop/c"),paths)); paths=fc.util().listStatus(getTestRootPath(fc,"test/hadoop/a")); Assert.assertEquals(0,paths.length); RemoteIterator pathsIterator=fc.listStatus(getTestRootPath(fc,"test")); Assert.assertEquals(getTestRootPath(fc,"test/hadoop"),pathsIterator.next().getPath()); Assert.assertFalse(pathsIterator.hasNext()); pathsIterator=fc.listStatus(getTestRootPath(fc,"test/hadoop")); FileStatus[] subdirs=new FileStatus[3]; int i=0; while (i < 3 && pathsIterator.hasNext()) { subdirs[i++]=pathsIterator.next(); } Assert.assertFalse(pathsIterator.hasNext()); Assert.assertTrue(i == 3); Assert.assertTrue(containsPath(getTestRootPath(fc,"test/hadoop/a"),subdirs)); Assert.assertTrue(containsPath(getTestRootPath(fc,"test/hadoop/b"),subdirs)); Assert.assertTrue(containsPath(getTestRootPath(fc,"test/hadoop/c"),subdirs)); pathsIterator=fc.listStatus(getTestRootPath(fc,"test/hadoop/a")); Assert.assertFalse(pathsIterator.hasNext()); }

APIUtilityVerifier BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
@Test public void testListCorruptFileBlocks() throws IOException { final Path rootPath=getTestRootPath(fc,"test"); final Path path=new Path(rootPath,"zoo"); createFile(path); try { final RemoteIterator remoteIterator=fc.listCorruptFileBlocks(path); if (listCorruptedBlocksSupported()) { assertTrue(remoteIterator != null); Path p; while (remoteIterator.hasNext()) { p=remoteIterator.next(); System.out.println("corrupted block: " + p); } try { remoteIterator.next(); fail(); } catch ( NoSuchElementException nsee) { } } else { fail(); } } catch ( UnsupportedOperationException uoe) { if (listCorruptedBlocksSupported()) { fail(uoe.toString()); } else { } } }

UtilityVerifier BooleanVerifier HybridVerifier 
@Test public void testRenameFileAsExistingFile() throws Exception { if (!renameSupported()) return; Path src=getTestRootPath(fc,"test/hadoop/file"); createFile(src); Path dst=getTestRootPath(fc,"test/new/existingFile"); createFile(dst); try { rename(src,dst,false,true,false,Rename.NONE); Assert.fail("Expected exception was not thrown"); } catch ( IOException e) { Assert.assertTrue(unwrapException(e) instanceof FileAlreadyExistsException); } rename(src,dst,true,false,true,Rename.OVERWRITE); }

UtilityVerifier ExceptionVerifier HybridVerifier 
@Test(expected=HadoopIllegalArgumentException.class) public void testNullCreateFlag() throws IOException { Path p=getTestRootPath(fc,"test/file"); fc.create(p,null); Assert.fail("Excepted exception not thrown"); }

Class: org.apache.hadoop.fs.FileContextPermissionBase

APIUtilityVerifier BranchVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testSetOwner() throws IOException { if (Path.WINDOWS) { System.out.println("Cannot run test for Windows"); return; } String filename="bar"; Path f=fileContextTestHelper.getTestRootPath(fc,filename); createFile(fc,f); List groups=null; try { groups=getGroups(); System.out.println(filename + ": " + fc.getFileStatus(f).getPermission()); } catch ( IOException e) { System.out.println(StringUtils.stringifyException(e)); System.out.println("Cannot run test"); return; } if (groups == null || groups.size() < 1) { System.out.println("Cannot run test: need at least one group. groups=" + groups); return; } try { String g0=groups.get(0); fc.setOwner(f,null,g0); Assert.assertEquals(g0,fc.getFileStatus(f).getGroup()); if (groups.size() > 1) { String g1=groups.get(1); fc.setOwner(f,null,g1); Assert.assertEquals(g1,fc.getFileStatus(f).getGroup()); } else { System.out.println("Not testing changing the group since user " + "belongs to only one group."); } try { fc.setOwner(f,null,null); fail("Exception expected."); } catch ( IllegalArgumentException iae) { } } finally { cleanupFile(fc,f); } }

Class: org.apache.hadoop.fs.FileContextURIBase

APIUtilityVerifier UtilityVerifier BooleanVerifier HybridVerifier 
@Test public void testCreateFileWithNullName() throws IOException { String fileName=null; try { Path testPath=qualifiedPath(fileName,fc2); Assert.assertFalse(exists(fc2,testPath)); createFile(fc1,testPath); Assert.fail("Create file with null name should throw IllegalArgumentException."); } catch ( IllegalArgumentException e) { } }

InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier 
@Test public void testFileStatus() throws IOException { String fileName="file1"; Path path2=fc2.makeQualified(new Path(BASE,fileName)); createFile(fc1,path2); FsStatus fc2Status=fc2.getFsStatus(path2); Assert.assertNotNull(fc2Status); Assert.assertTrue(fc2Status.getCapacity() > 0); Assert.assertTrue(fc2Status.getRemaining() > 0); Assert.assertTrue(fc2Status.getUsed() > 0); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testCreateFileInNonExistingDirectory() throws IOException { String fileName="testDir/testFile"; Path testPath=qualifiedPath(fileName,fc2); Assert.assertFalse(exists(fc2,testPath)); createFile(fc1,testPath); Assert.assertTrue(isDir(fc2,testPath.getParent())); Assert.assertEquals("testDir",testPath.getParent().getName()); Assert.assertTrue(exists(fc2,testPath)); }

APIUtilityVerifier UtilityVerifier BooleanVerifier HybridVerifier 
@Test public void testCreateExistingFile() throws IOException { String fileName="testFile"; Path testPath=qualifiedPath(fileName,fc2); Assert.assertFalse(exists(fc2,testPath)); createFile(fc1,testPath); try { createFile(fc2,testPath); Assert.fail("Create existing file should throw an IOException."); } catch ( IOException e) { } Assert.assertTrue(exists(fc2,testPath)); }

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testListStatus() throws Exception { final String hPrefix="test/hadoop"; final String[] dirs={hPrefix + "/a",hPrefix + "/b",hPrefix + "/c",hPrefix + "/1",hPrefix + "/#@#@",hPrefix + "/&*#$#$@234"}; ArrayList testDirs=new ArrayList(); for ( String d : dirs) { if (!isTestableFileNameOnPlatform(d)) { continue; } testDirs.add(qualifiedPath(d,fc2)); } Assert.assertFalse(exists(fc1,testDirs.get(0))); for ( Path path : testDirs) { fc1.mkdir(path,FsPermission.getDefault(),true); } FileStatus[] paths=fc1.util().listStatus(qualifiedPath("test",fc1)); Assert.assertEquals(1,paths.length); Assert.assertEquals(qualifiedPath(hPrefix,fc1),paths[0].getPath()); paths=fc1.util().listStatus(qualifiedPath(hPrefix,fc1)); Assert.assertEquals(testDirs.size(),paths.length); for (int i=0; i < testDirs.size(); i++) { boolean found=false; for (int j=0; j < paths.length; j++) { if (qualifiedPath(testDirs.get(i).toString(),fc1).equals(paths[j].getPath())) { found=true; } } Assert.assertTrue(testDirs.get(i) + " not found",found); } paths=fc1.util().listStatus(qualifiedPath(dirs[0],fc1)); Assert.assertEquals(0,paths.length); RemoteIterator pathsItor=fc1.listStatus(qualifiedPath("test",fc1)); Assert.assertEquals(qualifiedPath(hPrefix,fc1),pathsItor.next().getPath()); Assert.assertFalse(pathsItor.hasNext()); pathsItor=fc1.listStatus(qualifiedPath(hPrefix,fc1)); int dirLen=0; for (; pathsItor.hasNext(); dirLen++) { boolean found=false; FileStatus stat=pathsItor.next(); for (int j=0; j < dirs.length; j++) { if (qualifiedPath(dirs[j],fc1).equals(stat.getPath())) { found=true; break; } } Assert.assertTrue(stat.getPath() + " not found",found); } Assert.assertEquals(testDirs.size(),dirLen); pathsItor=fc1.listStatus(qualifiedPath(dirs[0],fc1)); Assert.assertFalse(pathsItor.hasNext()); }

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
@Test public void testMkdirsFailsForSubdirectoryOfExistingFile() throws Exception { Path testDir=qualifiedPath("test/hadoop",fc2); Assert.assertFalse(exists(fc2,testDir)); fc2.mkdir(testDir,FsPermission.getDefault(),true); Assert.assertTrue(exists(fc2,testDir)); createFile(fc1,qualifiedPath("test/hadoop/file",fc2)); Path testSubDir=qualifiedPath("test/hadoop/file/subdir",fc2); try { fc1.mkdir(testSubDir,FsPermission.getDefault(),true); Assert.fail("Should throw IOException."); } catch ( IOException e) { } Assert.assertFalse(exists(fc1,testSubDir)); Path testDeepSubDir=qualifiedPath("test/hadoop/file/deep/sub/dir",fc1); try { fc2.mkdir(testDeepSubDir,FsPermission.getDefault(),true); Assert.fail("Should throw IOException."); } catch ( IOException e) { } Assert.assertFalse(exists(fc1,testDeepSubDir)); }

Class: org.apache.hadoop.fs.SymlinkBaseTest

UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
@Test(timeout=10000) public void testRenameDirToSymlinkToFile() throws IOException { Path dir1=new Path(testBaseDir1()); Path file=new Path(testBaseDir2(),"file"); Path linkToFile=new Path(testBaseDir2(),"linkToFile"); createAndWriteFile(file); wrapper.createSymlink(file,linkToFile,false); try { wrapper.rename(dir1,linkToFile,Rename.OVERWRITE); fail("Renamed directory to a symlink"); } catch ( IOException e) { assertTrue(unwrapException(e) instanceof IOException); } assertTrue(wrapper.exists(dir1)); assertTrue(wrapper.exists(linkToFile)); }

UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testRenameSymlinkToDirItLinksTo() throws IOException { if ("file".equals(getScheme())) { return; } Path dir=new Path(testBaseDir1(),"dir"); Path link=new Path(testBaseDir1(),"linkToDir"); wrapper.mkdir(dir,FileContext.DEFAULT_PERM,false); wrapper.createSymlink(dir,link,false); try { wrapper.rename(link,dir); fail("Renamed symlink to its target"); } catch ( IOException e) { assertTrue(unwrapException(e) instanceof FileAlreadyExistsException); } assertTrue(wrapper.isDir(dir)); assertTrue(wrapper.exists(link)); assertTrue(wrapper.isSymlink(link)); assertEquals(dir,wrapper.getLinkTarget(link)); try { wrapper.rename(link,dir,Rename.OVERWRITE); fail("Renamed symlink to its target"); } catch ( IOException e) { assertTrue(unwrapException(e) instanceof FileAlreadyExistsException); } assertTrue(wrapper.isDir(dir)); assertTrue(wrapper.exists(link)); assertTrue(wrapper.isSymlink(link)); assertEquals(dir,wrapper.getLinkTarget(link)); }

UtilityVerifier InternalCallVerifier BooleanVerifier AssumptionSetter HybridVerifier 
@Test(timeout=10000) public void testRenameSymlink() throws IOException { assumeTrue(!emulatingSymlinksOnWindows()); Path file=new Path(testBaseDir1(),"file"); Path link1=new Path(testBaseDir1(),"linkToFile1"); Path link2=new Path(testBaseDir1(),"linkToFile2"); createAndWriteFile(file); wrapper.createSymlink(file,link1,false); wrapper.rename(link1,link2); assertTrue(wrapper.getFileLinkStatus(link2).isSymlink()); assertFalse(wrapper.getFileStatus(link2).isDirectory()); readFile(link2); readFile(file); try { createAndWriteFile(link2); fail("link was not renamed"); } catch ( IOException x) { } }

UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
@Test(timeout=10000) public void testRenameDirToDanglingSymlink() throws IOException { Path dir=new Path(testBaseDir1()); Path link=new Path(testBaseDir2(),"linkToFile"); wrapper.createSymlink(new Path("/doesNotExist"),link,false); try { wrapper.rename(dir,link,Rename.OVERWRITE); fail("Renamed directory to a symlink"); } catch ( IOException e) { assertTrue(unwrapException(e) instanceof IOException); } assertTrue(wrapper.exists(dir)); assertTrue(wrapper.getFileLinkStatus(link) != null); }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testCreateLinkUsingFullyQualPaths() throws IOException { Path fileAbs=new Path(testBaseDir1(),"file"); Path linkAbs=new Path(testBaseDir1(),"linkToFile"); Path fileQual=new Path(testURI().toString(),fileAbs); Path linkQual=new Path(testURI().toString(),linkAbs); createAndWriteFile(fileAbs); wrapper.createSymlink(fileQual,linkQual,false); checkLink(linkAbs,"file".equals(getScheme()) ? fileAbs : fileQual,fileQual); Path dir1=new Path(testBaseDir1()); Path dir2=new Path(testBaseDir2()); Path linkViaDir2=new Path(testBaseDir2(),"linkToFile"); wrapper.rename(dir1,dir2,Rename.OVERWRITE); assertEquals(fileQual,wrapper.getFileLinkStatus(linkViaDir2).getSymlink()); try { readFile(linkViaDir2); fail("The target should not exist"); } catch ( FileNotFoundException x) { } }

UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
@Test(timeout=10000) public void testCreateLinkCanCreateParent() throws IOException { Path file=new Path(testBaseDir1() + "/file"); Path link=new Path(testBaseDir2() + "/linkToFile"); createAndWriteFile(file); wrapper.delete(new Path(testBaseDir2()),true); try { wrapper.createSymlink(file,link,false); fail("Created link without first creating parent dir"); } catch ( IOException x) { } assertFalse(wrapper.exists(new Path(testBaseDir2()))); wrapper.createSymlink(file,link,true); readFile(link); }

UtilityVerifier AssumptionSetter HybridVerifier 
@Test(timeout=10000) public void testCreateLinkTwice() throws IOException { assumeTrue(!emulatingSymlinksOnWindows()); Path file=new Path(testBaseDir1(),"file"); Path link=new Path(testBaseDir1(),"linkToFile"); createAndWriteFile(file); wrapper.createSymlink(file,link,false); try { wrapper.createSymlink(file,link,false); fail("link already exists"); } catch ( IOException x) { } }

UtilityVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testRecursiveLinks() throws IOException { Path link1=new Path(testBaseDir1() + "/link1"); Path link2=new Path(testBaseDir1() + "/link2"); wrapper.createSymlink(link1,link2,false); wrapper.createSymlink(link2,link1,false); try { readFile(link1); fail("Read recursive link"); } catch ( FileNotFoundException f) { } catch ( IOException x) { assertEquals("Possible cyclic loop while following symbolic link " + link1.toString(),x.getMessage()); } }

UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testRenameSymlinkToFileItLinksTo() throws IOException { if ("file".equals(getScheme())) { return; } Path file=new Path(testBaseDir1(),"file"); Path link=new Path(testBaseDir1(),"linkToFile"); createAndWriteFile(file); wrapper.createSymlink(file,link,false); try { wrapper.rename(link,file); fail("Renamed symlink to its target"); } catch ( IOException e) { assertTrue(unwrapException(e) instanceof FileAlreadyExistsException); } assertTrue(wrapper.isFile(file)); assertTrue(wrapper.exists(link)); assertTrue(wrapper.isSymlink(link)); assertEquals(file,wrapper.getLinkTarget(link)); try { wrapper.rename(link,file,Rename.OVERWRITE); fail("Renamed symlink to its target"); } catch ( IOException e) { assertTrue(unwrapException(e) instanceof FileAlreadyExistsException); } assertTrue(wrapper.isFile(file)); assertTrue(wrapper.exists(link)); assertTrue(wrapper.isSymlink(link)); assertEquals(file,wrapper.getLinkTarget(link)); }

UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
@Test(timeout=10000) public void testRenameFileToSymlinkToFile() throws IOException { Path file1=new Path(testBaseDir1(),"file1"); Path file2=new Path(testBaseDir1(),"file2"); Path link=new Path(testBaseDir1(),"linkToFile"); createAndWriteFile(file1); createAndWriteFile(file2); wrapper.createSymlink(file2,link,false); try { wrapper.rename(file1,link); fail("Renamed file to symlink w/o overwrite"); } catch ( IOException e) { assertTrue(unwrapException(e) instanceof FileAlreadyExistsException); } wrapper.rename(file1,link,Rename.OVERWRITE); assertFalse(wrapper.exists(file1)); assertTrue(wrapper.exists(link)); assertTrue(wrapper.isFile(link)); assertFalse(wrapper.getFileLinkStatus(link).isSymlink()); }

UtilityVerifier BooleanVerifier HybridVerifier 
@Test(timeout=10000) public void testRenameSymlinkToItself() throws IOException { Path file=new Path(testBaseDir1(),"file"); createAndWriteFile(file); Path link=new Path(testBaseDir1(),"linkToFile1"); wrapper.createSymlink(file,link,false); try { wrapper.rename(link,link); fail("Failed to get expected IOException"); } catch ( IOException e) { assertTrue(unwrapException(e) instanceof FileAlreadyExistsException); } try { wrapper.rename(link,link,Rename.OVERWRITE); fail("Failed to get expected IOException"); } catch ( IOException e) { assertTrue(unwrapException(e) instanceof FileAlreadyExistsException); } }

BranchVerifier InternalCallVerifier BooleanVerifier AssumptionSetter EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testStatLinkToFile() throws IOException { assumeTrue(!emulatingSymlinksOnWindows()); Path file=new Path(testBaseDir1() + "/file"); Path linkToFile=new Path(testBaseDir1() + "/linkToFile"); createAndWriteFile(file); wrapper.createSymlink(file,linkToFile,false); assertFalse(wrapper.getFileLinkStatus(linkToFile).isDirectory()); assertTrue(wrapper.isSymlink(linkToFile)); assertTrue(wrapper.isFile(linkToFile)); assertFalse(wrapper.isDir(linkToFile)); assertEquals(file,wrapper.getLinkTarget(linkToFile)); if (!"file".equals(getScheme())) { assertEquals(wrapper.getFileStatus(file),wrapper.getFileStatus(linkToFile)); assertEquals(wrapper.makeQualified(file),wrapper.getFileStatus(linkToFile).getPath()); assertEquals(wrapper.makeQualified(linkToFile),wrapper.getFileLinkStatus(linkToFile).getPath()); } }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testAccessDirViaSymlink() throws IOException { Path baseDir=new Path(testBaseDir1()); Path dir=new Path(testBaseDir1(),"dir"); Path linkToDir=new Path(testBaseDir2(),"linkToDir"); Path dirViaLink=new Path(linkToDir,"dir"); wrapper.createSymlink(baseDir,linkToDir,false); wrapper.mkdir(dirViaLink,FileContext.DEFAULT_PERM,true); assertTrue(wrapper.getFileStatus(dirViaLink).isDirectory()); FileStatus[] stats=wrapper.listStatus(dirViaLink); assertEquals(0,stats.length); RemoteIterator statsItor=wrapper.listStatusIterator(dirViaLink); assertFalse(statsItor.hasNext()); wrapper.delete(dirViaLink,false); assertFalse(wrapper.exists(dirViaLink)); assertFalse(wrapper.exists(dir)); }

UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
@Test(timeout=10000) public void testRenameDirToSymlinkToDir() throws IOException { Path dir1=new Path(testBaseDir1()); Path subDir=new Path(testBaseDir2(),"subDir"); Path linkToDir=new Path(testBaseDir2(),"linkToDir"); wrapper.mkdir(subDir,FileContext.DEFAULT_PERM,false); wrapper.createSymlink(subDir,linkToDir,false); try { wrapper.rename(dir1,linkToDir,Rename.OVERWRITE); fail("Renamed directory to a symlink"); } catch ( IOException e) { assertTrue(unwrapException(e) instanceof IOException); } assertTrue(wrapper.exists(dir1)); assertTrue(wrapper.exists(linkToDir)); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testStatLinkToDir() throws IOException { Path dir=new Path(testBaseDir1()); Path linkToDir=new Path(testBaseDir1() + "/linkToDir"); wrapper.createSymlink(dir,linkToDir,false); assertFalse(wrapper.getFileStatus(linkToDir).isSymlink()); assertTrue(wrapper.isDir(linkToDir)); assertFalse(wrapper.getFileLinkStatus(linkToDir).isDirectory()); assertTrue(wrapper.getFileLinkStatus(linkToDir).isSymlink()); assertFalse(wrapper.isFile(linkToDir)); assertTrue(wrapper.isDir(linkToDir)); assertEquals(dir,wrapper.getLinkTarget(linkToDir)); }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testCreateLinkUsingAbsPaths() throws IOException { Path fileAbs=new Path(testBaseDir1() + "/file"); Path linkAbs=new Path(testBaseDir1() + "/linkToFile"); Path schemeAuth=new Path(testURI().toString()); Path fileQual=new Path(schemeAuth,testBaseDir1() + "/file"); createAndWriteFile(fileAbs); wrapper.createSymlink(fileAbs,linkAbs,false); checkLink(linkAbs,fileAbs,fileQual); Path dir1=new Path(testBaseDir1()); Path dir2=new Path(testBaseDir2()); Path linkViaDir2=new Path(testBaseDir2(),"linkToFile"); wrapper.rename(dir1,dir2,Rename.OVERWRITE); assertEquals(fileQual,wrapper.getFileLinkStatus(linkViaDir2).getSymlink()); try { readFile(linkViaDir2); fail("The target should not exist"); } catch ( FileNotFoundException x) { } }

InternalCallVerifier AssumptionSetter EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testAccessFileViaInterSymlinkRelTarget() throws IOException { assumeTrue(!"file".equals(getScheme())); Path dir=new Path(testBaseDir1(),"dir"); Path file=new Path(dir,"file"); Path linkToDir=new Path(testBaseDir1(),"linkToDir"); Path fileViaLink=new Path(linkToDir,"file"); wrapper.mkdir(dir,FileContext.DEFAULT_PERM,false); wrapper.createSymlink(new Path("dir"),linkToDir,false); createAndWriteFile(fileViaLink); assertEquals(wrapper.makeQualified(file),wrapper.getFileStatus(file).getPath()); assertEquals(wrapper.getFileStatus(file),wrapper.getFileLinkStatus(file)); assertEquals(wrapper.getFileStatus(fileViaLink),wrapper.getFileLinkStatus(fileViaLink)); assertEquals(wrapper.getFileStatus(fileViaLink),wrapper.getFileLinkStatus(file)); }

InternalCallVerifier AssumptionSetter EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testStatRelLinkToFile() throws IOException { assumeTrue(!"file".equals(getScheme())); Path file=new Path(testBaseDir1(),"file"); Path linkToFile=new Path(testBaseDir1(),"linkToFile"); createAndWriteFile(file); wrapper.createSymlink(new Path("file"),linkToFile,false); assertEquals(wrapper.getFileStatus(file),wrapper.getFileStatus(linkToFile)); assertEquals(wrapper.makeQualified(file),wrapper.getFileStatus(linkToFile).getPath()); assertEquals(wrapper.makeQualified(linkToFile),wrapper.getFileLinkStatus(linkToFile).getPath()); }

UtilityVerifier AssumptionSetter HybridVerifier 
@Test(timeout=10000) public void testRenameLinkTarget() throws IOException { assumeTrue(!emulatingSymlinksOnWindows()); Path file=new Path(testBaseDir1(),"file"); Path fileNew=new Path(testBaseDir1(),"fileNew"); Path link=new Path(testBaseDir1(),"linkToFile"); createAndWriteFile(file); wrapper.createSymlink(file,link,false); wrapper.rename(file,fileNew,Rename.OVERWRITE); try { readFile(link); fail("Link should be dangling"); } catch ( IOException x) { } wrapper.rename(fileNew,file,Rename.OVERWRITE); readFile(link); }

UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
@Test(timeout=10000) public void testRenameFileToDestViaSymlink() throws IOException { Path dir=new Path(testBaseDir1()); Path file=new Path(testBaseDir1(),"file"); Path linkToDir=new Path(testBaseDir2(),"linkToDir"); Path subDir=new Path(linkToDir,"subDir"); createAndWriteFile(file); wrapper.createSymlink(dir,linkToDir,false); wrapper.mkdir(subDir,FileContext.DEFAULT_PERM,false); try { wrapper.rename(file,subDir); fail("Renamed file to a directory"); } catch ( IOException e) { assertTrue(unwrapException(e) instanceof IOException); } assertTrue(wrapper.exists(file)); }

UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
@Test(timeout=10000) public void testRenameFileToSymlinkToDir() throws IOException { Path file=new Path(testBaseDir1(),"file"); Path subDir=new Path(testBaseDir1(),"subDir"); Path link=new Path(testBaseDir1(),"link"); wrapper.mkdir(subDir,FileContext.DEFAULT_PERM,false); wrapper.createSymlink(subDir,link,false); createAndWriteFile(file); try { wrapper.rename(file,link); fail("Renamed file to symlink w/o overwrite"); } catch ( IOException e) { assertTrue(unwrapException(e) instanceof FileAlreadyExistsException); } wrapper.rename(file,link,Rename.OVERWRITE); assertFalse(wrapper.exists(file)); assertTrue(wrapper.exists(link)); assertTrue(wrapper.isFile(link)); assertFalse(wrapper.getFileLinkStatus(link).isSymlink()); }

UtilityVerifier BooleanVerifier HybridVerifier 
@Test(timeout=10000) public void testRenameSymlinkToExistingDir() throws IOException { Path dir1=new Path(testBaseDir1()); Path dir2=new Path(testBaseDir2()); Path subDir=new Path(testBaseDir2(),"subDir"); Path link=new Path(testBaseDir1(),"linkToDir"); wrapper.createSymlink(dir1,link,false); try { wrapper.rename(link,dir2); fail("Renamed link to a directory"); } catch ( IOException e) { assertTrue(unwrapException(e) instanceof IOException); } try { wrapper.rename(link,dir2,Rename.OVERWRITE); fail("Renamed link to a directory"); } catch ( IOException e) { assertTrue(unwrapException(e) instanceof IOException); } wrapper.mkdir(subDir,FsPermission.getDefault(),false); try { wrapper.rename(link,dir2,Rename.OVERWRITE); fail("Renamed link to a directory"); } catch ( IOException e) { assertTrue(unwrapException(e) instanceof IOException); } }

UtilityVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testMkdirExistingLink() throws IOException { Path file=new Path(testBaseDir1() + "/targetFile"); createAndWriteFile(file); Path dir=new Path(testBaseDir1() + "/link"); wrapper.createSymlink(file,dir,false); try { wrapper.mkdir(dir,FileContext.DEFAULT_PERM,false); fail("Created a dir where a symlink exists"); } catch ( FileAlreadyExistsException e) { } catch ( IOException e) { assertEquals("file",getScheme()); } }

BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testRenameSymlinkToExistingFile() throws IOException { Path file1=new Path(testBaseDir1(),"file"); Path file2=new Path(testBaseDir1(),"someFile"); Path link=new Path(testBaseDir1(),"linkToFile"); createAndWriteFile(file1); createAndWriteFile(file2); wrapper.createSymlink(file2,link,false); try { wrapper.rename(link,file1); fail("Renamed w/o passing overwrite"); } catch ( IOException e) { assertTrue(unwrapException(e) instanceof FileAlreadyExistsException); } wrapper.rename(link,file1,Rename.OVERWRITE); assertFalse(wrapper.exists(link)); if (!emulatingSymlinksOnWindows()) { assertTrue(wrapper.getFileLinkStatus(file1).isSymlink()); assertEquals(file2,wrapper.getLinkTarget(file1)); } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testLinkStatusAndTargetWithNonLink() throws IOException { Path schemeAuth=new Path(testURI().toString()); Path dir=new Path(testBaseDir1()); Path dirQual=new Path(schemeAuth,dir.toString()); Path file=new Path(testBaseDir1(),"file"); Path fileQual=new Path(schemeAuth,file.toString()); createAndWriteFile(file); assertEquals(wrapper.getFileStatus(file),wrapper.getFileLinkStatus(file)); assertEquals(wrapper.getFileStatus(dir),wrapper.getFileLinkStatus(dir)); try { wrapper.getLinkTarget(file); fail("Get link target on non-link should throw an IOException"); } catch ( IOException x) { assertEquals("Path " + fileQual + " is not a symbolic link",x.getMessage()); } try { wrapper.getLinkTarget(dir); fail("Get link target on non-link should throw an IOException"); } catch ( IOException x) { assertEquals("Path " + dirQual + " is not a symbolic link",x.getMessage()); } }

BranchVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testCreateLinkUsingPartQualPath2() throws IOException { Path link=new Path(testBaseDir1(),"linkToFile"); Path fileWoScheme=new Path("//" + testURI().getAuthority() + testBaseDir1()+ "/file"); if ("file".equals(getScheme())) { return; } wrapper.createSymlink(fileWoScheme,link,false); assertEquals(fileWoScheme,wrapper.getLinkTarget(link)); assertEquals(fileWoScheme.toString(),wrapper.getFileLinkStatus(link).getSymlink().toString()); try { readFile(link); fail("Accessed a file with w/o scheme"); } catch ( IOException e) { if (wrapper instanceof FileContextTestWrapper) { assertEquals("No AbstractFileSystem for scheme: null",e.getMessage()); } else if (wrapper instanceof FileSystemTestWrapper) { assertEquals("No FileSystem for scheme: null",e.getMessage()); } } }

InternalCallVerifier BooleanVerifier AssumptionSetter EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testCreateLinkViaLink() throws IOException { assumeTrue(!emulatingSymlinksOnWindows()); Path dir1=new Path(testBaseDir1()); Path file=new Path(testBaseDir1(),"file"); Path linkToDir=new Path(testBaseDir2(),"linkToDir"); Path fileViaLink=new Path(linkToDir,"file"); Path linkToFile=new Path(linkToDir,"linkToFile"); createAndWriteFile(file); wrapper.createSymlink(dir1,linkToDir,false); wrapper.createSymlink(fileViaLink,linkToFile,false); assertTrue(wrapper.isFile(linkToFile)); assertTrue(wrapper.getFileLinkStatus(linkToFile).isSymlink()); readFile(linkToFile); assertEquals(fileSize,wrapper.getFileStatus(linkToFile).getLen()); assertEquals(fileViaLink,wrapper.getLinkTarget(linkToFile)); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testAccessFileViaInterSymlinkAbsTarget() throws IOException { Path baseDir=new Path(testBaseDir1()); Path file=new Path(testBaseDir1(),"file"); Path fileNew=new Path(baseDir,"fileNew"); Path linkToDir=new Path(testBaseDir2(),"linkToDir"); Path fileViaLink=new Path(linkToDir,"file"); Path fileNewViaLink=new Path(linkToDir,"fileNew"); wrapper.createSymlink(baseDir,linkToDir,false); createAndWriteFile(fileViaLink); assertTrue(wrapper.exists(fileViaLink)); assertTrue(wrapper.isFile(fileViaLink)); assertFalse(wrapper.isDir(fileViaLink)); assertFalse(wrapper.getFileLinkStatus(fileViaLink).isSymlink()); assertFalse(wrapper.isDir(fileViaLink)); assertEquals(wrapper.getFileStatus(file),wrapper.getFileLinkStatus(file)); assertEquals(wrapper.getFileStatus(fileViaLink),wrapper.getFileLinkStatus(fileViaLink)); readFile(fileViaLink); appendToFile(fileViaLink); wrapper.rename(fileViaLink,fileNewViaLink); assertFalse(wrapper.exists(fileViaLink)); assertTrue(wrapper.exists(fileNewViaLink)); readFile(fileNewViaLink); assertEquals(wrapper.getFileBlockLocations(fileNew,0,1).length,wrapper.getFileBlockLocations(fileNewViaLink,0,1).length); assertEquals(wrapper.getFileChecksum(fileNew),wrapper.getFileChecksum(fileNewViaLink)); wrapper.delete(fileNewViaLink,true); assertFalse(wrapper.exists(fileNewViaLink)); }

BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier AssumptionSetter EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testCreateLinkUsingPartQualPath1() throws IOException { assumeTrue(!"file".equals(getScheme())); Path schemeAuth=new Path(testURI().toString()); Path fileWoHost=new Path(getScheme() + "://" + testBaseDir1()+ "/file"); Path link=new Path(testBaseDir1() + "/linkToFile"); Path linkQual=new Path(schemeAuth,testBaseDir1() + "/linkToFile"); FSTestWrapper localWrapper=wrapper.getLocalFSWrapper(); wrapper.createSymlink(fileWoHost,link,false); assertEquals(fileWoHost,wrapper.getLinkTarget(linkQual)); assertEquals(fileWoHost.toString(),wrapper.getFileLinkStatus(link).getSymlink().toString()); assertEquals(fileWoHost.toString(),wrapper.getFileLinkStatus(linkQual).getSymlink().toString()); if (wrapper instanceof FileContextTestWrapper) { assertEquals(fileWoHost.toString(),localWrapper.getFileLinkStatus(linkQual).getSymlink().toString()); } try { readFile(link); fail("DFS requires URIs with schemes have an authority"); } catch ( java.lang.RuntimeException e) { assertTrue(wrapper instanceof FileContextTestWrapper); } catch ( FileNotFoundException e) { assertTrue(wrapper instanceof FileSystemTestWrapper); GenericTestUtils.assertExceptionContains("File does not exist: /test1/file",e); } }

Class: org.apache.hadoop.fs.TestChecksumFileSystem

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testCorruptedChecksum() throws Exception { Path testPath=new Path(TEST_ROOT_DIR,"testCorruptChecksum"); Path checksumPath=localFs.getChecksumFile(testPath); FSDataOutputStream out=localFs.create(testPath,true); out.write("testing 1 2 3".getBytes()); out.close(); assertTrue(localFs.exists(checksumPath)); FileStatus stat=localFs.getFileStatus(checksumPath); out=localFs.getRawFileSystem().create(testPath,true); out.write("testing stale checksum".getBytes()); out.close(); assertTrue(localFs.exists(checksumPath)); assertEquals(stat,localFs.getFileStatus(checksumPath)); Exception e=null; try { localFs.setVerifyChecksum(true); readFile(localFs,testPath,1024); } catch ( ChecksumException ce) { e=ce; } finally { assertNotNull("got checksum error",e); } localFs.setVerifyChecksum(false); String str=readFile(localFs,testPath,1024); assertEquals("testing stale checksum",str); }

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
/** * Test to ensure that if the checksum file is truncated, a * ChecksumException is thrown */ @Test public void testTruncatedChecksum() throws Exception { Path testPath=new Path(TEST_ROOT_DIR,"testtruncatedcrc"); FSDataOutputStream fout=localFs.create(testPath); fout.write("testing truncation".getBytes()); fout.close(); Path checksumFile=localFs.getChecksumFile(testPath); FileSystem rawFs=localFs.getRawFileSystem(); FSDataInputStream checksumStream=rawFs.open(checksumFile); byte buf[]=new byte[8192]; int read=checksumStream.read(buf,0,buf.length); checksumStream.close(); FSDataOutputStream replaceStream=rawFs.create(checksumFile); replaceStream.write(buf,0,read - 1); replaceStream.close(); try { readFile(localFs,testPath,1024); fail("Did not throw a ChecksumException when reading truncated " + "crc file"); } catch ( ChecksumException ie) { } localFs.setVerifyChecksum(false); String str=readFile(localFs,testPath,1024).toString(); assertTrue("read","testing truncation".equals(str)); }

Class: org.apache.hadoop.fs.TestDFVariations

TestCleaner BooleanVerifier HybridVerifier 
@After public void after() throws IOException { FileUtil.setWritable(test_root,true); FileUtil.fullyDelete(test_root); assertTrue(!test_root.exists()); }

Class: org.apache.hadoop.fs.TestEnhancedByteBufferAccess

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testShortZeroCopyReads() throws Exception { HdfsConfiguration conf=initZeroCopyTest(); MiniDFSCluster cluster=null; final Path TEST_PATH=new Path("/a"); FSDataInputStream fsIn=null; final int TEST_FILE_LENGTH=12345; FileSystem fs=null; try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); fs=cluster.getFileSystem(); DFSTestUtil.createFile(fs,TEST_PATH,TEST_FILE_LENGTH,(short)1,7567L); try { DFSTestUtil.waitReplication(fs,TEST_PATH,(short)1); } catch ( InterruptedException e) { Assert.fail("unexpected InterruptedException during " + "waitReplication: " + e); } catch ( TimeoutException e) { Assert.fail("unexpected TimeoutException during " + "waitReplication: " + e); } fsIn=fs.open(TEST_PATH); byte original[]=new byte[TEST_FILE_LENGTH]; IOUtils.readFully(fsIn,original,0,TEST_FILE_LENGTH); fsIn.close(); fsIn=fs.open(TEST_PATH); HdfsDataInputStream dfsIn=(HdfsDataInputStream)fsIn; ByteBuffer result=dfsIn.read(null,8192,EnumSet.of(ReadOption.SKIP_CHECKSUMS)); Assert.assertEquals(4096,result.remaining()); Assert.assertEquals(4096,dfsIn.getReadStatistics().getTotalBytesRead()); Assert.assertEquals(4096,dfsIn.getReadStatistics().getTotalZeroCopyBytesRead()); Assert.assertArrayEquals(Arrays.copyOfRange(original,0,4096),byteBufferToArray(result)); dfsIn.releaseBuffer(result); result=dfsIn.read(null,4097,EnumSet.of(ReadOption.SKIP_CHECKSUMS)); Assert.assertEquals(4096,result.remaining()); Assert.assertArrayEquals(Arrays.copyOfRange(original,4096,8192),byteBufferToArray(result)); dfsIn.releaseBuffer(result); } finally { if (fsIn != null) fsIn.close(); if (fs != null) fs.close(); if (cluster != null) cluster.shutdown(); } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testClientMmapDisable() throws Exception { HdfsConfiguration conf=initZeroCopyTest(); conf.setBoolean(DFS_CLIENT_MMAP_ENABLED,false); MiniDFSCluster cluster=null; final Path TEST_PATH=new Path("/a"); final int TEST_FILE_LENGTH=16385; final int RANDOM_SEED=23453; final String CONTEXT="testClientMmapDisable"; FSDataInputStream fsIn=null; DistributedFileSystem fs=null; conf.set(DFSConfigKeys.DFS_CLIENT_CONTEXT,CONTEXT); try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); fs=cluster.getFileSystem(); DFSTestUtil.createFile(fs,TEST_PATH,TEST_FILE_LENGTH,(short)1,RANDOM_SEED); DFSTestUtil.waitReplication(fs,TEST_PATH,(short)1); fsIn=fs.open(TEST_PATH); try { fsIn.read(null,1,EnumSet.of(ReadOption.SKIP_CHECKSUMS)); Assert.fail("expected zero-copy read to fail when client mmaps " + "were disabled."); } catch ( UnsupportedOperationException e) { } } finally { if (fsIn != null) fsIn.close(); if (fs != null) fs.close(); if (cluster != null) cluster.shutdown(); } fsIn=null; fs=null; cluster=null; try { conf.setBoolean(DFS_CLIENT_MMAP_ENABLED,true); conf.setInt(DFS_CLIENT_MMAP_CACHE_SIZE,0); conf.set(DFSConfigKeys.DFS_CLIENT_CONTEXT,CONTEXT + ".1"); cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); fs=cluster.getFileSystem(); DFSTestUtil.createFile(fs,TEST_PATH,TEST_FILE_LENGTH,(short)1,RANDOM_SEED); DFSTestUtil.waitReplication(fs,TEST_PATH,(short)1); fsIn=fs.open(TEST_PATH); ByteBuffer buf=fsIn.read(null,1,EnumSet.of(ReadOption.SKIP_CHECKSUMS)); fsIn.releaseBuffer(buf); IOUtils.skipFully(fsIn,TEST_FILE_LENGTH - 1); buf=fsIn.read(null,1,EnumSet.of(ReadOption.SKIP_CHECKSUMS)); Assert.assertEquals(null,buf); } finally { if (fsIn != null) fsIn.close(); if (fs != null) fs.close(); if (cluster != null) cluster.shutdown(); } }

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier 
@Test public void testZeroCopyMmapCache() throws Exception { HdfsConfiguration conf=initZeroCopyTest(); MiniDFSCluster cluster=null; final Path TEST_PATH=new Path("/a"); final int TEST_FILE_LENGTH=16385; final int RANDOM_SEED=23453; final String CONTEXT="testZeroCopyMmapCacheContext"; FSDataInputStream fsIn=null; ByteBuffer results[]={null,null,null,null}; DistributedFileSystem fs=null; conf.set(DFSConfigKeys.DFS_CLIENT_CONTEXT,CONTEXT); cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); fs=cluster.getFileSystem(); DFSTestUtil.createFile(fs,TEST_PATH,TEST_FILE_LENGTH,(short)1,RANDOM_SEED); try { DFSTestUtil.waitReplication(fs,TEST_PATH,(short)1); } catch ( InterruptedException e) { Assert.fail("unexpected InterruptedException during " + "waitReplication: " + e); } catch ( TimeoutException e) { Assert.fail("unexpected TimeoutException during " + "waitReplication: " + e); } fsIn=fs.open(TEST_PATH); byte original[]=new byte[TEST_FILE_LENGTH]; IOUtils.readFully(fsIn,original,0,TEST_FILE_LENGTH); fsIn.close(); fsIn=fs.open(TEST_PATH); final ShortCircuitCache cache=ClientContext.get(CONTEXT,new DFSClient.Conf(conf)).getShortCircuitCache(); cache.accept(new CountingVisitor(0,5,5,0)); results[0]=fsIn.read(null,4096,EnumSet.of(ReadOption.SKIP_CHECKSUMS)); fsIn.seek(0); results[1]=fsIn.read(null,4096,EnumSet.of(ReadOption.SKIP_CHECKSUMS)); final ExtendedBlock firstBlock=DFSTestUtil.getFirstBlock(fs,TEST_PATH); cache.accept(new CacheVisitor(){ @Override public void visit( int numOutstandingMmaps, Map replicas, Map failedLoads, Map evictable, Map evictableMmapped){ ShortCircuitReplica replica=replicas.get(new ExtendedBlockId(firstBlock.getBlockId(),firstBlock.getBlockPoolId())); Assert.assertNotNull(replica); Assert.assertTrue(replica.hasMmap()); Assert.assertNull(replica.getEvictableTimeNs()); } } ); results[2]=fsIn.read(null,4096,EnumSet.of(ReadOption.SKIP_CHECKSUMS)); results[3]=fsIn.read(null,4096,EnumSet.of(ReadOption.SKIP_CHECKSUMS)); cache.accept(new CountingVisitor(3,5,2,0)); for ( ByteBuffer buffer : results) { if (buffer != null) { fsIn.releaseBuffer(buffer); } } fsIn.close(); GenericTestUtils.waitFor(new Supplier(){ public Boolean get(){ final MutableBoolean finished=new MutableBoolean(false); cache.accept(new CacheVisitor(){ @Override public void visit( int numOutstandingMmaps, Map replicas, Map failedLoads, Map evictable, Map evictableMmapped){ finished.setValue(evictableMmapped.isEmpty()); } } ); return finished.booleanValue(); } } ,10,60000); cache.accept(new CountingVisitor(0,-1,-1,-1)); fs.close(); cluster.shutdown(); }

APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
/** * Test that we can zero-copy read cached data even without disabling * checksums. */ @Test(timeout=120000) public void testZeroCopyReadOfCachedData() throws Exception { BlockReaderTestUtil.enableShortCircuitShmTracing(); BlockReaderTestUtil.enableBlockReaderFactoryTracing(); BlockReaderTestUtil.enableHdfsCachingTracing(); final int TEST_FILE_LENGTH=16385; final Path TEST_PATH=new Path("/a"); final int RANDOM_SEED=23453; HdfsConfiguration conf=initZeroCopyTest(); conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_KEY,false); final String CONTEXT="testZeroCopyReadOfCachedData"; conf.set(DFSConfigKeys.DFS_CLIENT_CONTEXT,CONTEXT); conf.setLong(DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,DFSTestUtil.roundUpToMultiple(TEST_FILE_LENGTH,4096)); MiniDFSCluster cluster=null; ByteBuffer result=null, result2=null; cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); FsDatasetSpi fsd=cluster.getDataNodes().get(0).getFSDataset(); DistributedFileSystem fs=cluster.getFileSystem(); DFSTestUtil.createFile(fs,TEST_PATH,TEST_FILE_LENGTH,(short)1,RANDOM_SEED); DFSTestUtil.waitReplication(fs,TEST_PATH,(short)1); byte original[]=DFSTestUtil.calculateFileContentsFromSeed(RANDOM_SEED,TEST_FILE_LENGTH); FSDataInputStream fsIn=fs.open(TEST_PATH); try { result=fsIn.read(null,TEST_FILE_LENGTH / 2,EnumSet.noneOf(ReadOption.class)); Assert.fail("expected UnsupportedOperationException"); } catch ( UnsupportedOperationException e) { } fs.addCachePool(new CachePoolInfo("pool1")); long directiveId=fs.addCacheDirective(new CacheDirectiveInfo.Builder().setPath(TEST_PATH).setReplication((short)1).setPool("pool1").build()); int numBlocks=(int)Math.ceil((double)TEST_FILE_LENGTH / BLOCK_SIZE); DFSTestUtil.verifyExpectedCacheUsage(DFSTestUtil.roundUpToMultiple(TEST_FILE_LENGTH,BLOCK_SIZE),numBlocks,cluster.getDataNodes().get(0).getFSDataset()); try { result=fsIn.read(null,TEST_FILE_LENGTH,EnumSet.noneOf(ReadOption.class)); } catch ( UnsupportedOperationException e) { Assert.fail("expected to be able to read cached file via zero-copy"); } Assert.assertArrayEquals(Arrays.copyOfRange(original,0,BLOCK_SIZE),byteBufferToArray(result)); FSDataInputStream fsIn2=fs.open(TEST_PATH); try { result2=fsIn2.read(null,TEST_FILE_LENGTH,EnumSet.noneOf(ReadOption.class)); } catch ( UnsupportedOperationException e) { Assert.fail("expected to be able to read cached file via zero-copy"); } Assert.assertArrayEquals(Arrays.copyOfRange(original,0,BLOCK_SIZE),byteBufferToArray(result2)); fsIn2.releaseBuffer(result2); fsIn2.close(); final ExtendedBlock firstBlock=DFSTestUtil.getFirstBlock(fs,TEST_PATH); final ShortCircuitCache cache=ClientContext.get(CONTEXT,new DFSClient.Conf(conf)).getShortCircuitCache(); waitForReplicaAnchorStatus(cache,firstBlock,true,true,1); fs.removeCacheDirective(directiveId); waitForReplicaAnchorStatus(cache,firstBlock,false,true,1); fsIn.releaseBuffer(result); waitForReplicaAnchorStatus(cache,firstBlock,false,false,1); DFSTestUtil.verifyExpectedCacheUsage(0,0,fsd); fsIn.close(); fs.close(); cluster.shutdown(); }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testZeroCopyReads() throws Exception { HdfsConfiguration conf=initZeroCopyTest(); MiniDFSCluster cluster=null; final Path TEST_PATH=new Path("/a"); FSDataInputStream fsIn=null; final int TEST_FILE_LENGTH=12345; FileSystem fs=null; try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); fs=cluster.getFileSystem(); DFSTestUtil.createFile(fs,TEST_PATH,TEST_FILE_LENGTH,(short)1,7567L); try { DFSTestUtil.waitReplication(fs,TEST_PATH,(short)1); } catch ( InterruptedException e) { Assert.fail("unexpected InterruptedException during " + "waitReplication: " + e); } catch ( TimeoutException e) { Assert.fail("unexpected TimeoutException during " + "waitReplication: " + e); } fsIn=fs.open(TEST_PATH); byte original[]=new byte[TEST_FILE_LENGTH]; IOUtils.readFully(fsIn,original,0,TEST_FILE_LENGTH); fsIn.close(); fsIn=fs.open(TEST_PATH); ByteBuffer result=fsIn.read(null,4096,EnumSet.of(ReadOption.SKIP_CHECKSUMS)); Assert.assertEquals(4096,result.remaining()); HdfsDataInputStream dfsIn=(HdfsDataInputStream)fsIn; Assert.assertEquals(4096,dfsIn.getReadStatistics().getTotalBytesRead()); Assert.assertEquals(4096,dfsIn.getReadStatistics().getTotalZeroCopyBytesRead()); Assert.assertArrayEquals(Arrays.copyOfRange(original,0,4096),byteBufferToArray(result)); fsIn.releaseBuffer(result); } finally { if (fsIn != null) fsIn.close(); if (fs != null) fs.close(); if (cluster != null) cluster.shutdown(); } }

UtilityVerifier InternalCallVerifier AssumptionSetter EqualityVerifier HybridVerifier 
@Test public void test2GBMmapLimit() throws Exception { Assume.assumeTrue(BlockReaderTestUtil.shouldTestLargeFiles()); HdfsConfiguration conf=initZeroCopyTest(); final long TEST_FILE_LENGTH=2469605888L; conf.set(DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY,"NULL"); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,TEST_FILE_LENGTH); MiniDFSCluster cluster=null; final Path TEST_PATH=new Path("/a"); final String CONTEXT="test2GBMmapLimit"; conf.set(DFSConfigKeys.DFS_CLIENT_CONTEXT,CONTEXT); FSDataInputStream fsIn=null, fsIn2=null; ByteBuffer buf1=null, buf2=null; try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); DistributedFileSystem fs=cluster.getFileSystem(); DFSTestUtil.createFile(fs,TEST_PATH,TEST_FILE_LENGTH,(short)1,0xB); DFSTestUtil.waitReplication(fs,TEST_PATH,(short)1); fsIn=fs.open(TEST_PATH); buf1=fsIn.read(null,1,EnumSet.of(ReadOption.SKIP_CHECKSUMS)); Assert.assertEquals(1,buf1.remaining()); fsIn.releaseBuffer(buf1); buf1=null; fsIn.seek(2147483640L); buf1=fsIn.read(null,1024,EnumSet.of(ReadOption.SKIP_CHECKSUMS)); Assert.assertEquals(7,buf1.remaining()); Assert.assertEquals(Integer.MAX_VALUE,buf1.limit()); fsIn.releaseBuffer(buf1); buf1=null; Assert.assertEquals(2147483647L,fsIn.getPos()); try { buf1=fsIn.read(null,1024,EnumSet.of(ReadOption.SKIP_CHECKSUMS)); Assert.fail("expected UnsupportedOperationException"); } catch ( UnsupportedOperationException e) { } fsIn.close(); fsIn=null; final Path TEST_PATH2=new Path("/b"); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,268435456L); DFSTestUtil.createFile(fs,TEST_PATH2,1024 * 1024,TEST_FILE_LENGTH,268435456L,(short)1,0xA); fsIn2=fs.open(TEST_PATH2); fsIn2.seek(2147483640L); buf2=fsIn2.read(null,1024,EnumSet.of(ReadOption.SKIP_CHECKSUMS)); Assert.assertEquals(8,buf2.remaining()); Assert.assertEquals(2147483648L,fsIn2.getPos()); fsIn2.releaseBuffer(buf2); buf2=null; buf2=fsIn2.read(null,1024,EnumSet.of(ReadOption.SKIP_CHECKSUMS)); Assert.assertEquals(1024,buf2.remaining()); Assert.assertEquals(2147484672L,fsIn2.getPos()); fsIn2.releaseBuffer(buf2); buf2=null; } finally { if (buf1 != null) { fsIn.releaseBuffer(buf1); } if (buf2 != null) { fsIn2.releaseBuffer(buf2); } IOUtils.cleanup(null,fsIn,fsIn2); if (cluster != null) { cluster.shutdown(); } } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testZeroCopyReadsNoFallback() throws Exception { HdfsConfiguration conf=initZeroCopyTest(); MiniDFSCluster cluster=null; final Path TEST_PATH=new Path("/a"); FSDataInputStream fsIn=null; final int TEST_FILE_LENGTH=12345; FileSystem fs=null; try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); fs=cluster.getFileSystem(); DFSTestUtil.createFile(fs,TEST_PATH,TEST_FILE_LENGTH,(short)1,7567L); try { DFSTestUtil.waitReplication(fs,TEST_PATH,(short)1); } catch ( InterruptedException e) { Assert.fail("unexpected InterruptedException during " + "waitReplication: " + e); } catch ( TimeoutException e) { Assert.fail("unexpected TimeoutException during " + "waitReplication: " + e); } fsIn=fs.open(TEST_PATH); byte original[]=new byte[TEST_FILE_LENGTH]; IOUtils.readFully(fsIn,original,0,TEST_FILE_LENGTH); fsIn.close(); fsIn=fs.open(TEST_PATH); HdfsDataInputStream dfsIn=(HdfsDataInputStream)fsIn; ByteBuffer result; try { result=dfsIn.read(null,4097,EnumSet.noneOf(ReadOption.class)); Assert.fail("expected UnsupportedOperationException"); } catch ( UnsupportedOperationException e) { } result=dfsIn.read(null,4096,EnumSet.of(ReadOption.SKIP_CHECKSUMS)); Assert.assertEquals(4096,result.remaining()); Assert.assertEquals(4096,dfsIn.getReadStatistics().getTotalBytesRead()); Assert.assertEquals(4096,dfsIn.getReadStatistics().getTotalZeroCopyBytesRead()); Assert.assertArrayEquals(Arrays.copyOfRange(original,0,4096),byteBufferToArray(result)); } finally { if (fsIn != null) fsIn.close(); if (fs != null) fs.close(); if (cluster != null) cluster.shutdown(); } }

Class: org.apache.hadoop.fs.TestFiListPath

UtilityVerifier EqualityVerifier HybridVerifier 
/** * Remove the target directory after the getListing RPC */ @Test public void testTargetDeletionForListStatus() throws Exception { LOG.info("Test Target Delete For listStatus"); try { fs.listStatus(TEST_PATH); fail("Test should fail with FileNotFoundException"); } catch ( FileNotFoundException e) { assertEquals("File " + TEST_PATH + " does not exist.",e.getMessage()); LOG.info(StringUtils.stringifyException(e)); } }

Class: org.apache.hadoop.fs.TestFiRename

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Rename test where both src and dst are files */ @Test public void testDeletionOfDstFile() throws Exception { Path src=getTestPath("testDeletionOfDstFile/dir/src"); Path dst=getTestPath("testDeletionOfDstFile/newdir/dst"); createFile(src); createFile(dst); final FSNamesystem namesystem=cluster.getNamesystem(); final long blocks=namesystem.getBlocksTotal(); final long fileCount=namesystem.getFilesTotal(); rename(src,dst,false,false,true,Rename.OVERWRITE); Assert.assertEquals(blocks - 1,namesystem.getBlocksTotal()); Assert.assertEquals(fileCount - 1,namesystem.getFilesTotal()); restartCluster(false); int count=0; boolean exception=true; src=getTestPath("testDeletionOfDstFile/dir/src"); dst=getTestPath("testDeletionOfDstFile/newdir/dst"); while (exception && count < 5) { try { exists(fc,src); exception=false; } catch ( Exception e) { LOG.warn("Exception " + " count " + count + " "+ e.getMessage()); Thread.sleep(1000); count++; } } Assert.assertFalse(exists(fc,src)); Assert.assertTrue(exists(fc,dst)); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Rename test where both src and dst are directories */ @Test public void testDeletionOfDstDirectory() throws Exception { Path src=getTestPath("testDeletionOfDstDirectory/dir/src"); Path dst=getTestPath("testDeletionOfDstDirectory/newdir/dst"); fc.mkdir(src,FileContext.DEFAULT_PERM,true); fc.mkdir(dst,FileContext.DEFAULT_PERM,true); FSNamesystem namesystem=cluster.getNamesystem(); long fileCount=namesystem.getFilesTotal(); rename(src,dst,false,false,true,Rename.OVERWRITE); Assert.assertEquals(fileCount - 1,namesystem.getFilesTotal()); restartCluster(false); src=getTestPath("testDeletionOfDstDirectory/dir/src"); dst=getTestPath("testDeletionOfDstDirectory/newdir/dst"); int count=0; boolean exception=true; while (exception && count < 5) { try { exists(fc,src); exception=false; } catch ( Exception e) { LOG.warn("Exception " + " count " + count + " "+ e.getMessage()); Thread.sleep(1000); count++; } } Assert.assertFalse(exists(fc,src)); Assert.assertTrue(exists(fc,dst)); }

Class: org.apache.hadoop.fs.TestFileSystemCaching

APIUtilityVerifier UtilityVerifier InternalCallVerifier IdentityVerifier EqualityVerifier HybridVerifier 
@Test public void testDefaultFsUris() throws Exception { final Configuration conf=new Configuration(); conf.set("fs.defaultfs.impl",DefaultFs.class.getName()); final URI defaultUri=URI.create("defaultfs://host"); FileSystem.setDefaultUri(conf,defaultUri); FileSystem fs=null; final FileSystem defaultFs=FileSystem.get(conf); assertEquals(defaultUri,defaultFs.getUri()); fs=FileSystem.get(URI.create("defaultfs:/"),conf); assertSame(defaultFs,fs); fs=FileSystem.get(URI.create("defaultfs:///"),conf); assertSame(defaultFs,fs); fs=FileSystem.get(URI.create("defaultfs://host"),conf); assertSame(defaultFs,fs); fs=FileSystem.get(URI.create("defaultfs://host2"),conf); assertNotSame(defaultFs,fs); fs=FileSystem.get(URI.create("/"),conf); assertSame(defaultFs,fs); try { fs=FileSystem.get(URI.create("//host"),conf); fail("got fs with auth but no scheme"); } catch ( Exception e) { assertEquals("No FileSystem for scheme: null",e.getMessage()); } try { fs=FileSystem.get(URI.create("//host2"),conf); fail("got fs with auth but no scheme"); } catch ( Exception e) { assertEquals("No FileSystem for scheme: null",e.getMessage()); } }

Class: org.apache.hadoop.fs.TestFileSystemTokens

InternalCallVerifier IdentityVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testFsWithChildTokensOneExists() throws Exception { Credentials credentials=new Credentials(); Text service1=new Text("singleTokenFs1"); Text service2=new Text("singleTokenFs2"); Token token=mock(Token.class); credentials.addToken(service2,token); MockFileSystem fs1=createFileSystemForServiceName(service1); MockFileSystem fs2=createFileSystemForServiceName(service2); MockFileSystem fs3=createFileSystemForServiceName(null); MockFileSystem multiFs=createFileSystemForServiceName(null,fs1,fs2,fs3); multiFs.addDelegationTokens(renewer,credentials); verifyTokenFetch(multiFs,false); verifyTokenFetch(fs1,true); verifyTokenFetch(fs2,false); verifyTokenFetch(fs3,false); assertEquals(2,credentials.numberOfTokens()); assertNotNull(credentials.getToken(service1)); assertSame(token,credentials.getToken(service2)); }

InternalCallVerifier IdentityVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testFsWithMyOwnExistsAndChildTokens() throws Exception { Credentials credentials=new Credentials(); Text service1=new Text("singleTokenFs1"); Text service2=new Text("singleTokenFs2"); Text myService=new Text("multiTokenFs"); Token token=mock(Token.class); credentials.addToken(myService,token); MockFileSystem fs1=createFileSystemForServiceName(service1); MockFileSystem fs2=createFileSystemForServiceName(service2); MockFileSystem multiFs=createFileSystemForServiceName(myService,fs1,fs2); multiFs.addDelegationTokens(renewer,credentials); verifyTokenFetch(multiFs,false); verifyTokenFetch(fs1,true); verifyTokenFetch(fs2,true); assertEquals(3,credentials.numberOfTokens()); assertSame(token,credentials.getToken(myService)); assertNotNull(credentials.getToken(service1)); assertNotNull(credentials.getToken(service2)); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testFsWithToken() throws Exception { Text service=new Text("singleTokenFs"); MockFileSystem fs=createFileSystemForServiceName(service); Credentials credentials=new Credentials(); fs.addDelegationTokens(renewer,credentials); verifyTokenFetch(fs,true); assertEquals(1,credentials.numberOfTokens()); assertNotNull(credentials.getToken(service)); }

InternalCallVerifier IdentityVerifier EqualityVerifier HybridVerifier 
@Test public void testFsWithTokenExists() throws Exception { Credentials credentials=new Credentials(); Text service=new Text("singleTokenFs"); MockFileSystem fs=createFileSystemForServiceName(service); Token token=mock(Token.class); credentials.addToken(service,token); fs.addDelegationTokens(renewer,credentials); verifyTokenFetch(fs,false); assertEquals(1,credentials.numberOfTokens()); assertSame(token,credentials.getToken(service)); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testFsWithDuplicateChildren() throws Exception { Credentials credentials=new Credentials(); Text service=new Text("singleTokenFs1"); MockFileSystem fs=createFileSystemForServiceName(service); MockFileSystem multiFs=createFileSystemForServiceName(null,fs,new FilterFileSystem(fs)); multiFs.addDelegationTokens(renewer,credentials); verifyTokenFetch(multiFs,false); verifyTokenFetch(fs,true); assertEquals(1,credentials.numberOfTokens()); assertNotNull(credentials.getToken(service)); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testFsWithChildTokens() throws Exception { Credentials credentials=new Credentials(); Text service1=new Text("singleTokenFs1"); Text service2=new Text("singleTokenFs2"); MockFileSystem fs1=createFileSystemForServiceName(service1); MockFileSystem fs2=createFileSystemForServiceName(service2); MockFileSystem fs3=createFileSystemForServiceName(null); MockFileSystem multiFs=createFileSystemForServiceName(null,fs1,fs2,fs3); multiFs.addDelegationTokens(renewer,credentials); verifyTokenFetch(multiFs,false); verifyTokenFetch(fs1,true); verifyTokenFetch(fs2,true); verifyTokenFetch(fs3,false); assertEquals(2,credentials.numberOfTokens()); assertNotNull(credentials.getToken(service1)); assertNotNull(credentials.getToken(service2)); }

InternalCallVerifier IdentityVerifier EqualityVerifier HybridVerifier 
@Test public void testFsWithDuplicateChildrenTokenExists() throws Exception { Credentials credentials=new Credentials(); Text service=new Text("singleTokenFs1"); Token token=mock(Token.class); credentials.addToken(service,token); MockFileSystem fs=createFileSystemForServiceName(service); MockFileSystem multiFs=createFileSystemForServiceName(null,fs,new FilterFileSystem(fs)); multiFs.addDelegationTokens(renewer,credentials); verifyTokenFetch(multiFs,false); verifyTokenFetch(fs,false); assertEquals(1,credentials.numberOfTokens()); assertSame(token,credentials.getToken(service)); }

InternalCallVerifier IdentityVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testFsWithNestedDuplicatesChildren() throws Exception { Credentials credentials=new Credentials(); Text service1=new Text("singleTokenFs1"); Text service2=new Text("singleTokenFs2"); Text service4=new Text("singleTokenFs4"); Text multiService=new Text("multiTokenFs"); Token token2=mock(Token.class); credentials.addToken(service2,token2); MockFileSystem fs1=createFileSystemForServiceName(service1); MockFileSystem fs1B=createFileSystemForServiceName(service1); MockFileSystem fs2=createFileSystemForServiceName(service2); MockFileSystem fs3=createFileSystemForServiceName(null); MockFileSystem fs4=createFileSystemForServiceName(service4); MockFileSystem multiFs=createFileSystemForServiceName(multiService,fs1,fs1B,fs2,fs2,new FilterFileSystem(fs3),new FilterFileSystem(new FilterFileSystem(fs4))); MockFileSystem superMultiFs=createFileSystemForServiceName(null,fs1,fs1B,fs1,new FilterFileSystem(fs3),new FilterFileSystem(multiFs)); superMultiFs.addDelegationTokens(renewer,credentials); verifyTokenFetch(superMultiFs,false); verifyTokenFetch(multiFs,true); verifyTokenFetch(fs1,true); verifyTokenFetch(fs2,false); verifyTokenFetch(fs3,false); verifyTokenFetch(fs4,true); assertEquals(4,credentials.numberOfTokens()); assertNotNull(credentials.getToken(service1)); assertNotNull(credentials.getToken(service2)); assertSame(token2,credentials.getToken(service2)); assertNotNull(credentials.getToken(multiService)); assertNotNull(credentials.getToken(service4)); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testFsWithMyOwnAndChildTokens() throws Exception { Credentials credentials=new Credentials(); Text service1=new Text("singleTokenFs1"); Text service2=new Text("singleTokenFs2"); Text myService=new Text("multiTokenFs"); Token token=mock(Token.class); credentials.addToken(service2,token); MockFileSystem fs1=createFileSystemForServiceName(service1); MockFileSystem fs2=createFileSystemForServiceName(service2); MockFileSystem multiFs=createFileSystemForServiceName(myService,fs1,fs2); multiFs.addDelegationTokens(renewer,credentials); verifyTokenFetch(multiFs,true); verifyTokenFetch(fs1,true); verifyTokenFetch(fs2,false); assertEquals(3,credentials.numberOfTokens()); assertNotNull(credentials.getToken(myService)); assertNotNull(credentials.getToken(service1)); assertNotNull(credentials.getToken(service2)); }

Class: org.apache.hadoop.fs.TestFileUtil

APIUtilityVerifier UtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testListAPI() throws IOException { setupDirs(); String[] files=FileUtil.list(partitioned); Assert.assertEquals("Unexpected number of pre-existing files",2,files.length); File newDir=new File(tmp.getPath(),"test"); newDir.mkdir(); Assert.assertTrue("Failed to create test dir",newDir.exists()); files=FileUtil.list(newDir); Assert.assertEquals("New directory unexpectedly contains files",0,files.length); newDir.delete(); Assert.assertFalse("Failed to delete test dir",newDir.exists()); try { files=FileUtil.list(newDir); Assert.fail("IOException expected on list() for non-existent dir " + newDir.toString()); } catch ( IOException ioe) { } }

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Tests if fullyDelete deletes * (a) symlink to file only and not the file pointed to by symlink. * (b) symlink to dir only and not the dir pointed to by symlink. * @throws IOException */ @Test(timeout=30000) public void testFullyDeleteSymlinks() throws IOException { setupDirs(); File link=new File(del,LINK); Assert.assertEquals(5,del.list().length); boolean ret=FileUtil.fullyDelete(link); Assert.assertTrue(ret); Assert.assertFalse(link.exists()); Assert.assertEquals(4,del.list().length); validateTmpDir(); File linkDir=new File(del,"tmpDir"); ret=FileUtil.fullyDelete(linkDir); Assert.assertTrue(ret); Assert.assertFalse(linkDir.exists()); Assert.assertEquals(3,del.list().length); validateTmpDir(); }

BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testUnZip() throws IOException { setupDirs(); final File simpleZip=new File(del,FILE); OutputStream os=new FileOutputStream(simpleZip); ZipOutputStream tos=new ZipOutputStream(os); try { ZipEntry ze=new ZipEntry("foo"); byte[] data="some-content".getBytes("UTF-8"); ze.setSize(data.length); tos.putNextEntry(ze); tos.write(data); tos.closeEntry(); tos.flush(); tos.finish(); } finally { tos.close(); } FileUtil.unZip(simpleZip,tmp); assertTrue(new File(tmp,"foo").exists()); assertEquals(12,new File(tmp,"foo").length()); final File regularFile=new File(tmp,"QuickBrownFoxJumpsOverTheLazyDog"); regularFile.createNewFile(); assertTrue(regularFile.exists()); try { FileUtil.unZip(simpleZip,regularFile); assertTrue("An IOException expected.",false); } catch ( IOException ioe) { } }

BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testCopyMergeSingleDirectory() throws IOException { setupDirs(); boolean copyMergeResult=copyMerge("partitioned","tmp/merged"); Assert.assertTrue("Expected successful copyMerge result.",copyMergeResult); File merged=new File(TEST_DIR,"tmp/merged"); Assert.assertTrue("File tmp/merged must exist after copyMerge.",merged.exists()); BufferedReader rdr=new BufferedReader(new FileReader(merged)); try { Assert.assertEquals("Line 1 of merged file must contain \"foo\".","foo",rdr.readLine()); Assert.assertEquals("Line 2 of merged file must contain \"bar\".","bar",rdr.readLine()); Assert.assertNull("Expected end of file reading merged file.",rdr.readLine()); } finally { rdr.close(); } }

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testStat2Paths2(){ Path defaultPath=new Path("file://default"); Path[] paths=FileUtil.stat2Paths(null,defaultPath); assertEquals(1,paths.length); assertEquals(defaultPath,paths[0]); paths=FileUtil.stat2Paths(null,null); assertTrue(paths != null); assertEquals(1,paths.length); assertEquals(null,paths[0]); Path path1=new Path("file://foo"); Path path2=new Path("file://moo"); FileStatus[] fileStatuses=new FileStatus[]{new FileStatus(3,false,0,0,0,path1),new FileStatus(3,false,0,0,0,path2)}; paths=FileUtil.stat2Paths(fileStatuses,defaultPath); assertEquals(2,paths.length); assertEquals(paths[0],path1); assertEquals(paths[1],path2); }

BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testSymlink() throws Exception { Assert.assertFalse(del.exists()); del.mkdirs(); byte[] data="testSymLink".getBytes(); File file=new File(del,FILE); File link=new File(del,"_link"); FileOutputStream os=new FileOutputStream(file); os.write(data); os.close(); FileUtil.symLink(file.getAbsolutePath(),link.getAbsolutePath()); Assert.assertEquals(data.length,file.length()); Assert.assertEquals(data.length,link.length()); FileInputStream in=new FileInputStream(link); long len=0; while (in.read() > 0) { len++; } in.close(); Assert.assertEquals(data.length,len); }

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testFullyDeleteContents() throws IOException { setupDirs(); boolean ret=FileUtil.fullyDeleteContents(del); Assert.assertTrue(ret); Assert.assertTrue(del.exists()); Assert.assertEquals(0,del.listFiles().length); validateTmpDir(); }

APIUtilityVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testStat2Paths1(){ assertNull(FileUtil.stat2Paths(null)); FileStatus[] fileStatuses=new FileStatus[0]; Path[] paths=FileUtil.stat2Paths(fileStatuses); assertEquals(0,paths.length); Path path1=new Path("file://foo"); Path path2=new Path("file://moo"); fileStatuses=new FileStatus[]{new FileStatus(3,false,0,0,0,path1),new FileStatus(3,false,0,0,0,path2)}; paths=FileUtil.stat2Paths(fileStatuses); assertEquals(2,paths.length); assertEquals(paths[0],path1); assertEquals(paths[1],path2); }

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testCopy5() throws IOException { setupDirs(); URI uri=tmp.toURI(); Configuration conf=new Configuration(); FileSystem fs=FileSystem.newInstance(uri,conf); final String content="some-content"; File srcFile=createFile(tmp,"src",content); Path srcPath=new Path(srcFile.toURI()); final File dest=new File(del,"dest"); boolean result=FileUtil.copy(fs,srcPath,dest,false,conf); assertTrue(result); assertTrue(dest.exists()); assertEquals(content.getBytes().length + System.getProperty("line.separator").getBytes().length,dest.length()); assertTrue(srcFile.exists()); dest.delete(); assertTrue(!dest.exists()); result=FileUtil.copy(fs,srcPath,dest,true,conf); assertTrue(result); assertTrue(dest.exists()); assertEquals(content.getBytes().length + System.getProperty("line.separator").getBytes().length,dest.length()); assertTrue(!srcFile.exists()); dest.delete(); assertTrue(!dest.exists()); srcPath=new Path(partitioned.toURI()); result=FileUtil.copy(fs,srcPath,dest,true,conf); assertTrue(result); assertTrue(dest.exists() && dest.isDirectory()); File[] files=dest.listFiles(); assertTrue(files != null); assertEquals(2,files.length); for ( File f : files) { assertEquals(3 + System.getProperty("line.separator").getBytes().length,f.length()); } assertTrue(!partitioned.exists()); }

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Tests if fullyDelete deletes * (a) dangling symlink to file properly * (b) dangling symlink to directory properly * @throws IOException */ @Test(timeout=30000) public void testFullyDeleteDanglingSymlinks() throws IOException { setupDirs(); boolean ret=FileUtil.fullyDelete(tmp); Assert.assertTrue(ret); Assert.assertFalse(tmp.exists()); File link=new File(del,LINK); Assert.assertEquals(5,del.list().length); ret=FileUtil.fullyDelete(link); Assert.assertTrue(ret); Assert.assertEquals(4,del.list().length); File linkDir=new File(del,"tmpDir"); ret=FileUtil.fullyDelete(linkDir); Assert.assertTrue(ret); Assert.assertEquals(3,del.list().length); }

BranchVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test that length on a symlink works as expected. */ @Test(timeout=30000) public void testSymlinkLength() throws Exception { Assert.assertFalse(del.exists()); del.mkdirs(); byte[] data="testSymLinkData".getBytes(); File file=new File(del,FILE); File link=new File(del,"_link"); FileOutputStream os=new FileOutputStream(file); os.write(data); os.close(); Assert.assertEquals(0,link.length()); FileUtil.symLink(file.getAbsolutePath(),link.getAbsolutePath()); Assert.assertEquals(data.length,file.length()); Assert.assertEquals(data.length,link.length()); file.delete(); Assert.assertFalse(file.exists()); if (Shell.WINDOWS && !Shell.isJava7OrAbove()) { Assert.assertEquals(data.length,link.length()); } else { Assert.assertEquals(0,link.length()); } link.delete(); Assert.assertFalse(link.exists()); }

APIUtilityVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Test that getDU is able to handle cycles caused due to symbolic links * and that directory sizes are not added to the final calculated size * @throws IOException */ @Test(timeout=30000) public void testGetDU() throws Exception { setupDirs(); long du=FileUtil.getDU(TEST_DIR); final long expected=2 * (3 + System.getProperty("line.separator").length()); Assert.assertEquals(expected,du); final File doesNotExist=new File(tmp,"QuickBrownFoxJumpsOverTheLazyDog"); long duDoesNotExist=FileUtil.getDU(doesNotExist); assertEquals(0,duDoesNotExist); File notADirectory=new File(partitioned,"part-r-00000"); long duNotADirectoryActual=FileUtil.getDU(notADirectory); long duNotADirectoryExpected=3 + System.getProperty("line.separator").length(); assertEquals(duNotADirectoryExpected,duNotADirectoryActual); try { try { FileUtil.chmod(notADirectory.getAbsolutePath(),"0000"); } catch ( InterruptedException ie) { assertNull(ie); } assertFalse(FileUtil.canRead(notADirectory)); final long du3=FileUtil.getDU(partitioned); assertEquals(expected,du3); try { FileUtil.chmod(partitioned.getAbsolutePath(),"0000"); } catch ( InterruptedException ie) { assertNull(ie); } assertFalse(FileUtil.canRead(partitioned)); final long du4=FileUtil.getDU(partitioned); assertEquals(0,du4); } finally { FileUtil.chmod(partitioned.getAbsolutePath(),"0777",true); } }

APIUtilityVerifier UtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testListFiles() throws IOException { setupDirs(); File[] files=FileUtil.listFiles(partitioned); Assert.assertEquals(2,files.length); File newDir=new File(tmp.getPath(),"test"); newDir.mkdir(); Assert.assertTrue("Failed to create test dir",newDir.exists()); files=FileUtil.listFiles(newDir); Assert.assertEquals(0,files.length); newDir.delete(); Assert.assertFalse("Failed to delete test dir",newDir.exists()); try { files=FileUtil.listFiles(newDir); Assert.fail("IOException expected on listFiles() for non-existent dir " + newDir.toString()); } catch ( IOException ioe) { } }

BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testUnTar() throws IOException { setupDirs(); final File simpleTar=new File(del,FILE); OutputStream os=new FileOutputStream(simpleTar); TarOutputStream tos=new TarOutputStream(os); try { TarEntry te=new TarEntry("foo"); byte[] data="some-content".getBytes("UTF-8"); te.setSize(data.length); tos.putNextEntry(te); tos.write(data); tos.closeEntry(); tos.flush(); tos.finish(); } finally { tos.close(); } FileUtil.unTar(simpleTar,tmp); assertTrue(new File(tmp,"foo").exists()); assertEquals(12,new File(tmp,"foo").length()); final File regularFile=new File(tmp,"QuickBrownFoxJumpsOverTheLazyDog"); regularFile.createNewFile(); assertTrue(regularFile.exists()); try { FileUtil.unTar(simpleTar,regularFile); assertTrue("An IOException expected.",false); } catch ( IOException ioe) { } }

APIUtilityVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testCreateJarWithClassPath() throws Exception { Assert.assertFalse(tmp.exists()); Assert.assertTrue(tmp.mkdirs()); List wildcardMatches=Arrays.asList(new File(tmp,"wildcard1.jar"),new File(tmp,"wildcard2.jar"),new File(tmp,"wildcard3.JAR"),new File(tmp,"wildcard4.JAR")); for ( File wildcardMatch : wildcardMatches) { Assert.assertTrue("failure creating file: " + wildcardMatch,wildcardMatch.createNewFile()); } Assert.assertTrue(new File(tmp,"text.txt").createNewFile()); Assert.assertTrue(new File(tmp,"executable.exe").createNewFile()); Assert.assertTrue(new File(tmp,"README").createNewFile()); String wildcardPath=tmp.getCanonicalPath() + File.separator + "*"; String nonExistentSubdir=tmp.getCanonicalPath() + Path.SEPARATOR + "subdir"+ Path.SEPARATOR; List classPaths=Arrays.asList("","cp1.jar","cp2.jar",wildcardPath,"cp3.jar",nonExistentSubdir); String inputClassPath=StringUtils.join(File.pathSeparator,classPaths); String classPathJar=FileUtil.createJarWithClassPath(inputClassPath,new Path(tmp.getCanonicalPath()),System.getenv()); JarFile jarFile=null; try { jarFile=new JarFile(classPathJar); Manifest jarManifest=jarFile.getManifest(); Assert.assertNotNull(jarManifest); Attributes mainAttributes=jarManifest.getMainAttributes(); Assert.assertNotNull(mainAttributes); Assert.assertTrue(mainAttributes.containsKey(Attributes.Name.CLASS_PATH)); String classPathAttr=mainAttributes.getValue(Attributes.Name.CLASS_PATH); Assert.assertNotNull(classPathAttr); List expectedClassPaths=new ArrayList(); for ( String classPath : classPaths) { if (classPath.length() == 0) { continue; } if (wildcardPath.equals(classPath)) { for ( File wildcardMatch : wildcardMatches) { expectedClassPaths.add(wildcardMatch.toURI().toURL().toExternalForm()); } } else { File fileCp=null; if (!new Path(classPath).isAbsolute()) { fileCp=new File(tmp,classPath); } else { fileCp=new File(classPath); } if (nonExistentSubdir.equals(classPath)) { expectedClassPaths.add(fileCp.toURI().toURL().toExternalForm() + Path.SEPARATOR); } else { expectedClassPaths.add(fileCp.toURI().toURL().toExternalForm()); } } } List actualClassPaths=Arrays.asList(classPathAttr.split(" ")); Collections.sort(expectedClassPaths); Collections.sort(actualClassPaths); Assert.assertEquals(expectedClassPaths,actualClassPaths); } finally { if (jarFile != null) { try { jarFile.close(); } catch ( IOException e) { LOG.warn("exception closing jarFile: " + classPathJar,e); } } } }

Class: org.apache.hadoop.fs.TestFsShellCopy

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testRepresentsDir() throws Exception { Path subdirDstPath=new Path(dstPath,srcPath.getName()); String argv[]=null; lfs.delete(dstPath,true); assertFalse(lfs.exists(dstPath)); argv=new String[]{"-put",srcPath.toString(),dstPath.toString()}; assertEquals(0,shell.run(argv)); assertTrue(lfs.exists(dstPath) && lfs.isFile(dstPath)); lfs.delete(dstPath,true); assertFalse(lfs.exists(dstPath)); lfs.delete(dstPath,true); for ( String suffix : new String[]{"/","/."}) { argv=new String[]{"-put",srcPath.toString(),dstPath.toString() + suffix}; assertEquals(1,shell.run(argv)); assertFalse(lfs.exists(dstPath)); assertFalse(lfs.exists(subdirDstPath)); } for ( String suffix : new String[]{"/","/."}) { lfs.delete(dstPath,true); lfs.mkdirs(dstPath); argv=new String[]{"-put",srcPath.toString(),dstPath.toString() + suffix}; assertEquals(0,shell.run(argv)); assertTrue(lfs.exists(subdirDstPath)); assertTrue(lfs.isFile(subdirDstPath)); } String dotdotDst=dstPath + "/foo/.."; lfs.delete(dstPath,true); lfs.mkdirs(new Path(dstPath,"foo")); argv=new String[]{"-put",srcPath.toString(),dotdotDst}; assertEquals(0,shell.run(argv)); assertTrue(lfs.exists(subdirDstPath)); assertTrue(lfs.isFile(subdirDstPath)); }

InternalCallVerifier BooleanVerifier AssumptionSetter HybridVerifier 
@Test public void testMoveFromWindowsLocalPath() throws Exception { assumeTrue(Path.WINDOWS); Path testRoot=new Path(testRootDir,"testPutFile"); lfs.delete(testRoot,true); lfs.mkdirs(testRoot); Path target=new Path(testRoot,"target"); Path srcFile=new Path(testRoot,new Path("srcFile")); lfs.createNewFile(srcFile); String winSrcFile=(new File(srcFile.toUri().getPath().toString())).getAbsolutePath(); shellRun(0,"-moveFromLocal",winSrcFile,target.toString()); assertFalse(lfs.exists(srcFile)); assertTrue(lfs.exists(target)); assertTrue(lfs.isFile(target)); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testCopyMerge() throws Exception { Path root=new Path(testRootDir,"TestMerge"); Path f1=new Path(root,"f1"); Path f2=new Path(root,"f2"); Path f3=new Path(root,"f3"); Path fnf=new Path(root,"fnf"); Path d=new Path(root,"dir"); Path df1=new Path(d,"df1"); Path df2=new Path(d,"df2"); Path df3=new Path(d,"df3"); createFile(f1,f2,f3,df1,df2,df3); int exit; exit=shell.run(new String[]{"-getmerge",f1.toString(),"out"}); assertEquals(0,exit); assertEquals("f1",readFile("out")); exit=shell.run(new String[]{"-getmerge",fnf.toString(),"out"}); assertEquals(1,exit); assertFalse(lfs.exists(new Path("out"))); exit=shell.run(new String[]{"-getmerge",f1.toString(),f2.toString(),"out"}); assertEquals(0,exit); assertEquals("f1f2",readFile("out")); exit=shell.run(new String[]{"-getmerge",f2.toString(),f1.toString(),"out"}); assertEquals(0,exit); assertEquals("f2f1",readFile("out")); exit=shell.run(new String[]{"-getmerge","-nl",f1.toString(),f2.toString(),"out"}); assertEquals(0,exit); assertEquals("f1\nf2\n",readFile("out")); shell.run(new String[]{"-getmerge","-nl",new Path(root,"f*").toString(),"out"}); assertEquals(0,exit); assertEquals("f1\nf2\nf3\n",readFile("out")); shell.run(new String[]{"-getmerge","-nl",root.toString(),"out"}); assertEquals(0,exit); assertEquals("f1\nf2\nf3\n",readFile("out")); shell.run(new String[]{"-getmerge","-nl",d.toString(),"out"}); assertEquals(0,exit); assertEquals("df1\ndf2\ndf3\n",readFile("out")); shell.run(new String[]{"-getmerge","-nl",f1.toString(),d.toString(),f2.toString(),"out"}); assertEquals(0,exit); assertEquals("f1\ndf1\ndf2\ndf3\nf2\n",readFile("out")); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testMoveDirFromLocal() throws Exception { Path testRoot=new Path(testRootDir,"testPutDir"); lfs.delete(testRoot,true); lfs.mkdirs(testRoot); Path srcDir=new Path(testRoot,"srcDir"); lfs.mkdirs(srcDir); Path targetDir=new Path(testRoot,"target"); int exit=shell.run(new String[]{"-moveFromLocal",srcDir.toString(),targetDir.toString()}); assertEquals(0,exit); assertFalse(lfs.exists(srcDir)); assertTrue(lfs.exists(targetDir)); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testMoveFileFromLocal() throws Exception { Path testRoot=new Path(testRootDir,"testPutFile"); lfs.delete(testRoot,true); lfs.mkdirs(testRoot); Path target=new Path(testRoot,"target"); Path srcFile=new Path(testRoot,new Path("srcFile")); lfs.createNewFile(srcFile); int exit=shell.run(new String[]{"-moveFromLocal",srcFile.toString(),target.toString()}); assertEquals(0,exit); assertFalse(lfs.exists(srcFile)); assertTrue(lfs.exists(target)); assertTrue(lfs.isFile(target)); }

TestInitializer InternalCallVerifier BooleanVerifier HybridVerifier 
@Before public void prepFiles() throws Exception { lfs.setVerifyChecksum(true); lfs.setWriteChecksum(true); lfs.delete(srcPath,true); lfs.delete(dstPath,true); FSDataOutputStream out=lfs.create(srcPath); out.writeChars("hi"); out.close(); assertTrue(lfs.exists(lfs.getChecksumFile(srcPath))); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testMoveDirFromLocalDestExists() throws Exception { Path testRoot=new Path(testRootDir,"testPutDir"); lfs.delete(testRoot,true); lfs.mkdirs(testRoot); Path srcDir=new Path(testRoot,"srcDir"); lfs.mkdirs(srcDir); Path targetDir=new Path(testRoot,"target"); lfs.mkdirs(targetDir); int exit=shell.run(new String[]{"-moveFromLocal",srcDir.toString(),targetDir.toString()}); assertEquals(0,exit); assertFalse(lfs.exists(srcDir)); assertTrue(lfs.exists(new Path(targetDir,srcDir.getName()))); lfs.mkdirs(srcDir); exit=shell.run(new String[]{"-moveFromLocal",srcDir.toString(),targetDir.toString()}); assertEquals(1,exit); assertTrue(lfs.exists(srcDir)); }

Class: org.apache.hadoop.fs.TestFsShellReturnCode

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testRmForceWithNonexistentGlob() throws Exception { Configuration conf=new Configuration(); FsShell shell=new FsShell(); shell.setConf(conf); final ByteArrayOutputStream bytes=new ByteArrayOutputStream(); final PrintStream err=new PrintStream(bytes); final PrintStream oldErr=System.err; System.setErr(err); try { int exit=shell.run(new String[]{"-rm","-f","nomatch*"}); assertEquals(0,exit); assertTrue(bytes.toString().isEmpty()); } finally { IOUtils.closeStream(err); System.setErr(oldErr); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testGetWithInvalidSourcePathShouldNotDisplayNullInConsole() throws Exception { Configuration conf=new Configuration(); FsShell shell=new FsShell(); shell.setConf(conf); final ByteArrayOutputStream bytes=new ByteArrayOutputStream(); final PrintStream out=new PrintStream(bytes); final PrintStream oldErr=System.err; System.setErr(out); final String results; try { Path tdir=new Path(TEST_ROOT_DIR,"notNullCopy"); fileSys.delete(tdir,true); fileSys.mkdirs(tdir); String[] args=new String[3]; args[0]="-get"; args[1]=new Path(tdir.toUri().getPath(),"/invalidSrc").toString(); args[2]=new Path(tdir.toUri().getPath(),"/invalidDst").toString(); assertTrue("file exists",!fileSys.exists(new Path(args[1]))); assertTrue("file exists",!fileSys.exists(new Path(args[2]))); int run=shell.run(args); results=bytes.toString(); assertEquals("Return code should be 1",1,run); assertTrue(" Null is coming when source path is invalid. ",!results.contains("get: null")); assertTrue(" Not displaying the intended message ",results.contains("get: `" + args[1] + "': No such file or directory")); } finally { IOUtils.closeStream(out); System.setErr(oldErr); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testRmWithNonexistentGlob() throws Exception { Configuration conf=new Configuration(); FsShell shell=new FsShell(); shell.setConf(conf); final ByteArrayOutputStream bytes=new ByteArrayOutputStream(); final PrintStream err=new PrintStream(bytes); final PrintStream oldErr=System.err; System.setErr(err); final String results; try { int exit=shell.run(new String[]{"-rm","nomatch*"}); assertEquals(1,exit); results=bytes.toString(); assertTrue(results.contains("rm: `nomatch*': No such file or directory")); } finally { IOUtils.closeStream(err); System.setErr(oldErr); } }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test Chmod 1. Create and write file on FS 2. Verify that exit code for * chmod on existing file is 0 3. Verify that exit code for chmod on * non-existing file is 1 4. Verify that exit code for chmod with glob input * on non-existing file is 1 5. Verify that exit code for chmod with glob * input on existing file in 0 * @throws Exception */ @Test(timeout=30000) public void testChmod() throws Exception { Path p1=new Path(TEST_ROOT_DIR,"testChmod/fileExists"); final String f1=p1.toUri().getPath(); final String f2=new Path(TEST_ROOT_DIR,"testChmod/fileDoesNotExist").toUri().getPath(); final String f3=new Path(TEST_ROOT_DIR,"testChmod/nonExistingfiles*").toUri().getPath(); final Path p4=new Path(TEST_ROOT_DIR,"testChmod/file1"); final Path p5=new Path(TEST_ROOT_DIR,"testChmod/file2"); final Path p6=new Path(TEST_ROOT_DIR,"testChmod/file3"); final String f7=new Path(TEST_ROOT_DIR,"testChmod/file*").toUri().getPath(); writeFile(fileSys,p1); assertTrue(fileSys.exists(p1)); String argv[]={"-chmod","777",f1}; assertEquals(0,fsShell.run(argv)); String argv2[]={"-chmod","777",f2}; assertEquals(1,fsShell.run(argv2)); String argv3[]={"-chmod","777",f3}; assertEquals(1,fsShell.run(argv3)); writeFile(fileSys,p4); assertTrue(fileSys.exists(p4)); writeFile(fileSys,p5); assertTrue(fileSys.exists(p5)); writeFile(fileSys,p6); assertTrue(fileSys.exists(p6)); String argv4[]={"-chmod","777",f7}; assertEquals(0,fsShell.run(argv4)); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testInterrupt() throws Exception { MyFsShell shell=new MyFsShell(); shell.setConf(new Configuration()); final Path d=new Path(TEST_ROOT_DIR,"testInterrupt"); final Path f1=new Path(d,"f1"); final Path f2=new Path(d,"f2"); assertTrue(fileSys.mkdirs(d)); writeFile(fileSys,f1); assertTrue(fileSys.isFile(f1)); writeFile(fileSys,f2); assertTrue(fileSys.isFile(f2)); int exitCode=shell.run(new String[]{"-testInterrupt",f1.toString(),f2.toString()}); assertEquals(1,InterruptCommand.processed); assertEquals(130,exitCode); exitCode=shell.run(new String[]{"-testInterrupt",d.toString()}); assertEquals(2,InterruptCommand.processed); assertEquals(130,exitCode); }

Class: org.apache.hadoop.fs.TestGlobPaths

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void pTestCurlyBracket() throws IOException { Path[] matchedPath; String[] files; try { files=new String[]{USER_DIR + "/a.abcxx",USER_DIR + "/a.abxy",USER_DIR + "/a.hlp",USER_DIR + "/a.jhyy"}; matchedPath=prepareTesting(USER_DIR + "/a.{abc,jh}??",files); assertEquals(matchedPath.length,2); assertEquals(matchedPath[0],path[0]); assertEquals(matchedPath[1],path[3]); } finally { cleanupDFS(); } try { files=new String[]{USER_DIR + "/a.abcxx",USER_DIR + "/a.abdxy",USER_DIR + "/a.hlp",USER_DIR + "/a.jhyy"}; matchedPath=prepareTesting(USER_DIR + "/a.{ab{c,d},jh}??",files); assertEquals(matchedPath.length,3); assertEquals(matchedPath[0],path[0]); assertEquals(matchedPath[1],path[1]); assertEquals(matchedPath[2],path[3]); } finally { cleanupDFS(); } try { files=new String[]{USER_DIR + "/a/b",USER_DIR + "/a/d",USER_DIR + "/c/b",USER_DIR + "/c/d"}; matchedPath=prepareTesting(USER_DIR + "/{a/b,c/d}",files); assertEquals(matchedPath.length,2); assertEquals(matchedPath[0],path[0]); assertEquals(matchedPath[1],path[3]); } finally { cleanupDFS(); } try { files=new String[]{"/a/b","/a/d","/c/b","/c/d"}; matchedPath=prepareTesting("{/a/b,/c/d}",files); assertEquals(matchedPath.length,2); assertEquals(matchedPath[0],path[0]); assertEquals(matchedPath[1],path[3]); } finally { cleanupDFS(); } try { files=new String[]{USER_DIR + "/}bc",USER_DIR + "/}c"}; matchedPath=prepareTesting(USER_DIR + "/}{a,b}c",files); assertEquals(matchedPath.length,1); assertEquals(matchedPath[0],path[0]); matchedPath=prepareTesting(USER_DIR + "/}{b}c",files); assertEquals(matchedPath.length,1); assertEquals(matchedPath[0],path[0]); matchedPath=prepareTesting(USER_DIR + "/}{}bc",files); assertEquals(matchedPath.length,1); assertEquals(matchedPath[0],path[0]); matchedPath=prepareTesting(USER_DIR + "/}{,}bc",files); assertEquals(matchedPath.length,1); assertEquals(matchedPath[0],path[0]); matchedPath=prepareTesting(USER_DIR + "/}{b,}c",files); assertEquals(matchedPath.length,2); assertEquals(matchedPath[0],path[0]); assertEquals(matchedPath[1],path[1]); matchedPath=prepareTesting(USER_DIR + "/}{,b}c",files); assertEquals(matchedPath.length,2); assertEquals(matchedPath[0],path[0]); assertEquals(matchedPath[1],path[1]); matchedPath=prepareTesting(USER_DIR + "/}{ac,?}",files); assertEquals(matchedPath.length,1); assertEquals(matchedPath[0],path[1]); boolean hasException=false; try { prepareTesting(USER_DIR + "}{bc",files); } catch ( IOException e) { assertTrue(e.getMessage().startsWith("Illegal file pattern:")); hasException=true; } assertTrue(hasException); } finally { cleanupDFS(); } }

APIUtilityVerifier AssumptionSetter EqualityVerifier HybridVerifier 
@Test public void pTestEscape() throws IOException { org.junit.Assume.assumeTrue(!Path.WINDOWS); try { String[] files=new String[]{USER_DIR + "/ab\\[c.d"}; Path[] matchedPath=prepareTesting(USER_DIR + "/ab\\[c.d",files); assertEquals(matchedPath.length,1); assertEquals(matchedPath[0],path[0]); } finally { cleanupDFS(); } }

Class: org.apache.hadoop.fs.TestHarFileSystemBasics

IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testListLocatedStatus() throws Exception { String testHarPath=this.getClass().getResource("/test.har").getPath(); URI uri=new URI("har://" + testHarPath); HarFileSystem hfs=new HarFileSystem(localFileSystem); hfs.initialize(uri,new Configuration()); Set expectedFileNames=new HashSet(); expectedFileNames.add("1.txt"); expectedFileNames.add("2.txt"); Path path=new Path("dir1"); RemoteIterator fileList=hfs.listLocatedStatus(path); while (fileList.hasNext()) { String fileName=fileList.next().getPath().getName(); assertTrue(fileName + " not in expected files list",expectedFileNames.contains(fileName)); expectedFileNames.remove(fileName); } assertEquals("Didn't find all of the expected file names: " + expectedFileNames,0,expectedFileNames.size()); }

UtilityVerifier BooleanVerifier HybridVerifier 
@Test public void testNegativeInitWithAnUnsupportedVersion() throws Exception { Thread.sleep(1000); writeVersionToMasterIndexImpl(7777,new Path(harPath,"_masterindex")); final HarFileSystem hfs=new HarFileSystem(localFileSystem); assertFalse(hfs.getMetadata() == harFileSystem.getMetadata()); final URI uri=new URI("har://" + harPath.toString()); try { hfs.initialize(uri,new Configuration()); Assert.fail("IOException expected."); } catch ( IOException ioe) { } }

Class: org.apache.hadoop.fs.TestHardLink

TestInitializer BooleanVerifier HybridVerifier 
/** * Initialize clean environment for start of each test */ @Before public void setupDirs() throws IOException { assertFalse(src.exists()); assertFalse(tgt_one.exists()); assertFalse(tgt_mult.exists()); src.mkdirs(); tgt_one.mkdirs(); tgt_mult.mkdirs(); makeNonEmptyFile(x1,str1); makeNonEmptyFile(x2,str2); makeNonEmptyFile(x3,str3); validateSetup(); }

BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testWindowsSyntax(){ class win extends HardLinkCGWin { } ; assertEquals(5,win.hardLinkCommand.length); assertEquals(7,win.hardLinkMultPrefix.length); assertEquals(7,win.hardLinkMultSuffix.length); assertEquals(4,win.getLinkCountCommand.length); assertTrue(win.hardLinkMultPrefix[4].equals("%f")); assertEquals(2,("%f").length()); assertTrue(win.hardLinkMultDir.equals("\\%f")); assertEquals(3,("\\%f").length()); assertTrue(win.getLinkCountCommand[1].equals("hardlink")); assertEquals(4,("-c%h").length()); }

BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test the single-file method HardLink.createHardLink(). * Also tests getLinkCount() with values greater than one. */ @Test public void testCreateHardLink() throws IOException { createHardLink(x1,x1_one); assertTrue(x1_one.exists()); assertEquals(2,getLinkCount(x1)); assertEquals(2,getLinkCount(x1_one)); assertEquals(1,getLinkCount(x2)); createHardLink(x2,y_one); createHardLink(x3,x3_one); assertEquals(2,getLinkCount(x2)); assertEquals(2,getLinkCount(x3)); createHardLink(x1,x11_one); assertEquals(3,getLinkCount(x1)); assertEquals(3,getLinkCount(x1_one)); assertEquals(3,getLinkCount(x11_one)); validateTgtOne(); appendToFile(x1_one,str3); assertTrue(fetchFileContents(x1_one).equals(str1 + str3)); assertTrue(fetchFileContents(x11_one).equals(str1 + str3)); assertTrue(fetchFileContents(x1).equals(str1 + str3)); }

BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testCreateHardLinkMult() throws IOException { String[] fileNames=src.list(); createHardLinkMult(src,fileNames,tgt_mult); assertEquals(2,getLinkCount(x1)); assertEquals(2,getLinkCount(x2)); assertEquals(2,getLinkCount(x3)); assertEquals(2,getLinkCount(x1_mult)); assertEquals(2,getLinkCount(x2_mult)); assertEquals(2,getLinkCount(x3_mult)); validateTgtMult(); appendToFile(x1_mult,str3); assertTrue(fetchFileContents(x1_mult).equals(str1 + str3)); assertTrue(fetchFileContents(x1).equals(str1 + str3)); }

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test createHardLinkMult(), again, this time with the "too long list" * case where the total size of the command line arguments exceed the * allowed maximum. In this case, the list should be automatically * broken up into chunks, each chunk no larger than the max allowed. * We use an extended version of the method call, specifying the * size limit explicitly, to simulate the "too long" list with a * relatively short list. */ @Test public void testCreateHardLinkMultOversizeAndEmpty() throws IOException { String name1="x11111111"; String name2="x22222222"; String name3="x33333333"; File x1_long=new File(src,name1); File x2_long=new File(src,name2); File x3_long=new File(src,name3); x1.renameTo(x1_long); x2.renameTo(x2_long); x3.renameTo(x3_long); assertTrue(x1_long.exists()); assertTrue(x2_long.exists()); assertTrue(x3_long.exists()); assertFalse(x1.exists()); assertFalse(x2.exists()); assertFalse(x3.exists()); int callCount; String[] emptyList={}; String[] fileNames=src.list(); int overhead=getLinkMultArgLength(src,emptyList,tgt_mult); int maxLength=overhead + (int)(2.5 * (float)(1 + name1.length())); callCount=createHardLinkMult(src,fileNames,tgt_mult,maxLength); assertEquals(2,callCount); String[] tgt_multNames=tgt_mult.list(); Arrays.sort(fileNames); Arrays.sort(tgt_multNames); assertArrayEquals(fileNames,tgt_multNames); FileUtil.fullyDelete(tgt_mult); assertFalse(tgt_mult.exists()); tgt_mult.mkdirs(); assertTrue(tgt_mult.exists() && tgt_mult.list().length == 0); maxLength=overhead + (int)(0.5 * (float)(1 + name1.length())); callCount=createHardLinkMult(src,fileNames,tgt_mult,maxLength); assertEquals(3,callCount); tgt_multNames=tgt_mult.list(); Arrays.sort(fileNames); Arrays.sort(tgt_multNames); assertArrayEquals(fileNames,tgt_multNames); }

Class: org.apache.hadoop.fs.TestListFiles

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test when input path is a file */ @Test public void testFile() throws IOException { fs.mkdirs(TEST_DIR); writeFile(fs,FILE1,FILE_LEN); RemoteIterator itor=fs.listFiles(FILE1,true); LocatedFileStatus stat=itor.next(); assertFalse(itor.hasNext()); assertTrue(stat.isFile()); assertEquals(FILE_LEN,stat.getLen()); assertEquals(fs.makeQualified(FILE1),stat.getPath()); assertEquals(1,stat.getBlockLocations().length); itor=fs.listFiles(FILE1,false); stat=itor.next(); assertFalse(itor.hasNext()); assertTrue(stat.isFile()); assertEquals(FILE_LEN,stat.getLen()); assertEquals(fs.makeQualified(FILE1),stat.getPath()); assertEquals(1,stat.getBlockLocations().length); fs.delete(FILE1,true); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test when input path is a directory */ @Test public void testDirectory() throws IOException { fs.mkdirs(DIR1); RemoteIterator itor=fs.listFiles(DIR1,true); assertFalse(itor.hasNext()); itor=fs.listFiles(DIR1,false); assertFalse(itor.hasNext()); writeFile(fs,FILE2,FILE_LEN); itor=fs.listFiles(DIR1,true); LocatedFileStatus stat=itor.next(); assertFalse(itor.hasNext()); assertTrue(stat.isFile()); assertEquals(FILE_LEN,stat.getLen()); assertEquals(fs.makeQualified(FILE2),stat.getPath()); assertEquals(1,stat.getBlockLocations().length); itor=fs.listFiles(DIR1,false); stat=itor.next(); assertFalse(itor.hasNext()); assertTrue(stat.isFile()); assertEquals(FILE_LEN,stat.getLen()); assertEquals(fs.makeQualified(FILE2),stat.getPath()); assertEquals(1,stat.getBlockLocations().length); writeFile(fs,FILE1,FILE_LEN); writeFile(fs,FILE3,FILE_LEN); Set filesToFind=new HashSet(); filesToFind.add(fs.makeQualified(FILE1)); filesToFind.add(fs.makeQualified(FILE2)); filesToFind.add(fs.makeQualified(FILE3)); itor=fs.listFiles(TEST_DIR,true); stat=itor.next(); assertTrue(stat.isFile()); assertTrue("Path " + stat.getPath() + " unexpected",filesToFind.remove(stat.getPath())); stat=itor.next(); assertTrue(stat.isFile()); assertTrue("Path " + stat.getPath() + " unexpected",filesToFind.remove(stat.getPath())); stat=itor.next(); assertTrue(stat.isFile()); assertTrue("Path " + stat.getPath() + " unexpected",filesToFind.remove(stat.getPath())); assertFalse(itor.hasNext()); assertTrue(filesToFind.isEmpty()); itor=fs.listFiles(TEST_DIR,false); stat=itor.next(); assertTrue(stat.isFile()); assertEquals(fs.makeQualified(FILE1),stat.getPath()); assertFalse(itor.hasNext()); fs.delete(TEST_DIR,true); }

Class: org.apache.hadoop.fs.TestLocalDirAllocator

APIUtilityVerifier InternalCallVerifier BooleanVerifier AssumptionSetter EqualityVerifier HybridVerifier 
/** * Test that {@link LocalDirAllocator#getAllLocalPathsToRead(String,Configuration)} * returns correct filenames and "file" schema. * @throws IOException */ @Test(timeout=30000) public void testGetAllLocalPathsToRead() throws IOException { assumeTrue(!isWindows); String dir0=buildBufferDir(ROOT,0); String dir1=buildBufferDir(ROOT,1); try { conf.set(CONTEXT,dir0 + "," + dir1); assertTrue(localFs.mkdirs(new Path(dir0))); assertTrue(localFs.mkdirs(new Path(dir1))); localFs.create(new Path(dir0 + Path.SEPARATOR + FILENAME)); localFs.create(new Path(dir1 + Path.SEPARATOR + FILENAME)); final Iterable pathIterable=dirAllocator.getAllLocalPathsToRead(FILENAME,conf); int count=0; for ( final Path p : pathIterable) { count++; assertEquals(FILENAME,p.getName()); assertEquals("file",p.getFileSystem(conf).getUri().getScheme()); } assertEquals(2,count); try { Path p=pathIterable.iterator().next(); assertFalse("NoSuchElementException must be thrown, but returned [" + p + "] instead.",true); } catch ( NoSuchElementException nsee) { } final Iterable pathIterable2=dirAllocator.getAllLocalPathsToRead(FILENAME,conf); final Iterator it=pathIterable2.iterator(); try { it.remove(); assertFalse(true); } catch ( UnsupportedOperationException uoe) { } } finally { Shell.execCommand(new String[]{"chmod","u+w",BUFFER_DIR_ROOT}); rmBufferDirs(); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier AssumptionSetter HybridVerifier 
/** * Test no side effect files are left over. After creating a temp * temp file, remove both the temp file and its parent. Verify that * no files or directories are left over as can happen when File objects * are mistakenly created from fully qualified path strings. * @throws IOException */ @Test(timeout=30000) public void testNoSideEffects() throws IOException { assumeTrue(!isWindows); String dir=buildBufferDir(ROOT,0); try { conf.set(CONTEXT,dir); File result=dirAllocator.createTmpFileForWrite(FILENAME,-1,conf); assertTrue(result.delete()); assertTrue(result.getParentFile().delete()); assertFalse(new File(dir).exists()); } finally { Shell.execCommand(Shell.getSetPermissionCommand("u+w",false,BUFFER_DIR_ROOT)); rmBufferDirs(); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier AssumptionSetter EqualityVerifier HybridVerifier 
/** * Test getLocalPathToRead() returns correct filename and "file" schema. * @throws IOException */ @Test(timeout=30000) public void testGetLocalPathToRead() throws IOException { assumeTrue(!isWindows); String dir=buildBufferDir(ROOT,0); try { conf.set(CONTEXT,dir); assertTrue(localFs.mkdirs(new Path(dir))); File f1=dirAllocator.createTmpFileForWrite(FILENAME,SMALL_FILE_SIZE,conf); Path p1=dirAllocator.getLocalPathToRead(f1.getName(),conf); assertEquals(f1.getName(),p1.getName()); assertEquals("file",p1.getFileSystem(conf).getUri().getScheme()); } finally { Shell.execCommand(Shell.getSetPermissionCommand("u+w",false,BUFFER_DIR_ROOT)); rmBufferDirs(); } }

UtilityVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testShouldNotthrowNPE() throws Exception { Configuration conf1=new Configuration(); try { dirAllocator.getLocalPathForWrite("/test",conf1); fail("Exception not thrown when " + CONTEXT + " is not set"); } catch ( IOException e) { assertEquals(CONTEXT + " not configured",e.getMessage()); } catch ( NullPointerException e) { fail("Lack of configuration should not have thrown an NPE."); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Two buffer dirs. The first dir does not exist & is on a read-only disk; * The second dir exists & is RW * getLocalPathForWrite with checkAccess set to false should create a parent * directory. With checkAccess true, the directory should not be created. * @throws Exception */ @Test(timeout=30000) public void testLocalPathForWriteDirCreation() throws IOException { String dir0=buildBufferDir(ROOT,0); String dir1=buildBufferDir(ROOT,1); try { conf.set(CONTEXT,dir0 + "," + dir1); assertTrue(localFs.mkdirs(new Path(dir1))); BUFFER_ROOT.setReadOnly(); Path p1=dirAllocator.getLocalPathForWrite("p1/x",SMALL_FILE_SIZE,conf); assertTrue(localFs.getFileStatus(p1.getParent()).isDirectory()); Path p2=dirAllocator.getLocalPathForWrite("p2/x",SMALL_FILE_SIZE,conf,false); try { localFs.getFileStatus(p2.getParent()); } catch ( Exception e) { assertEquals(e.getClass(),FileNotFoundException.class); } } finally { Shell.execCommand(Shell.getSetPermissionCommand("u+w",false,BUFFER_DIR_ROOT)); rmBufferDirs(); } }

Class: org.apache.hadoop.fs.TestLocalFileSystem

InternalCallVerifier AssumptionSetter EqualityVerifier HybridVerifier 
@Test(timeout=1000) public void testListStatusWithColons() throws IOException { assumeTrue(!Shell.WINDOWS); File colonFile=new File(TEST_ROOT_DIR,"foo:bar"); colonFile.mkdirs(); FileStatus[] stats=fileSys.listStatus(new Path(TEST_ROOT_DIR)); assertEquals("Unexpected number of stats",1,stats.length); assertEquals("Bad path from stat",colonFile.getAbsolutePath(),stats[0].getPath().toUri().getPath()); }

TestCleaner BooleanVerifier HybridVerifier 
@After public void after() throws IOException { FileUtil.setWritable(base,true); FileUtil.fullyDelete(base); assertTrue(!base.exists()); }

UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=1000) public void testCreateFileAndMkdirs() throws IOException { Path test_dir=new Path(TEST_ROOT_DIR,"test_dir"); Path test_file=new Path(test_dir,"file1"); assertTrue(fileSys.mkdirs(test_dir)); final int fileSize=new Random().nextInt(1 << 20) + 1; writeFile(fileSys,test_file,fileSize); { final FileStatus status=fileSys.getFileStatus(test_file); Assert.assertEquals(fileSize,status.getLen()); final ContentSummary summary=fileSys.getContentSummary(test_dir); Assert.assertEquals(fileSize,summary.getLength()); } Path bad_dir=new Path(test_file,"another_dir"); try { fileSys.mkdirs(bad_dir); fail("Failed to detect existing file in path"); } catch ( ParentNotDirectoryException e) { } try { fileSys.mkdirs(null); fail("Failed to detect null in mkdir arg"); } catch ( IllegalArgumentException e) { } }

UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
@Test(timeout=10000) public void testCopy() throws IOException { Path src=new Path(TEST_ROOT_DIR,"dingo"); Path dst=new Path(TEST_ROOT_DIR,"yak"); writeFile(fileSys,src,1); assertTrue(FileUtil.copy(fileSys,src,fileSys,dst,true,false,conf)); assertTrue(!fileSys.exists(src) && fileSys.exists(dst)); assertTrue(FileUtil.copy(fileSys,dst,fileSys,src,false,false,conf)); assertTrue(fileSys.exists(src) && fileSys.exists(dst)); assertTrue(FileUtil.copy(fileSys,src,fileSys,dst,true,true,conf)); assertTrue(!fileSys.exists(src) && fileSys.exists(dst)); fileSys.mkdirs(src); assertTrue(FileUtil.copy(fileSys,dst,fileSys,src,false,false,conf)); Path tmp=new Path(src,dst.getName()); assertTrue(fileSys.exists(tmp) && fileSys.exists(dst)); assertTrue(FileUtil.copy(fileSys,dst,fileSys,src,false,true,conf)); assertTrue(fileSys.delete(tmp,true)); fileSys.mkdirs(tmp); try { FileUtil.copy(fileSys,dst,fileSys,src,true,true,conf); fail("Failed to detect existing dir"); } catch ( IOException e) { } }

APIUtilityVerifier InternalCallVerifier AssumptionSetter EqualityVerifier HybridVerifier 
@Test public void testListStatusReturnConsistentPathOnWindows() throws IOException { assumeTrue(Shell.WINDOWS); String dirNoDriveSpec=TEST_ROOT_DIR; if (dirNoDriveSpec.charAt(1) == ':') dirNoDriveSpec=dirNoDriveSpec.substring(2); File file=new File(dirNoDriveSpec,"foo"); file.mkdirs(); FileStatus[] stats=fileSys.listStatus(new Path(dirNoDriveSpec)); assertEquals("Unexpected number of stats",1,stats.length); assertEquals("Bad path from stat",new Path(file.getPath()).toUri().getPath(),stats[0].getPath().toUri().getPath()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test the capability of setting the working directory. */ @Test(timeout=10000) public void testWorkingDirectory() throws IOException { Path origDir=fileSys.getWorkingDirectory(); Path subdir=new Path(TEST_ROOT_DIR,"new"); try { assertTrue(!fileSys.exists(subdir)); assertTrue(fileSys.mkdirs(subdir)); assertTrue(fileSys.isDirectory(subdir)); fileSys.setWorkingDirectory(subdir); Path dir1=new Path("dir1"); assertTrue(fileSys.mkdirs(dir1)); assertTrue(fileSys.isDirectory(dir1)); fileSys.delete(dir1,true); assertTrue(!fileSys.exists(dir1)); Path file1=new Path("file1"); Path file2=new Path("sub/file2"); String contents=writeFile(fileSys,file1,1); fileSys.copyFromLocalFile(file1,file2); assertTrue(fileSys.exists(file1)); assertTrue(fileSys.isFile(file1)); cleanupFile(fileSys,file2); fileSys.copyToLocalFile(file1,file2); cleanupFile(fileSys,file2); fileSys.rename(file1,file2); assertTrue(!fileSys.exists(file1)); assertTrue(fileSys.exists(file2)); fileSys.rename(file2,file1); InputStream stm=fileSys.open(file1); byte[] buffer=new byte[3]; int bytesRead=stm.read(buffer,0,3); assertEquals(contents,new String(buffer,0,bytesRead)); stm.close(); } finally { fileSys.setWorkingDirectory(origDir); } }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=1000) public void testSetTimes() throws Exception { Path path=new Path(TEST_ROOT_DIR,"set-times"); writeFile(fileSys,path,1); long newModTime=12345000; FileStatus status=fileSys.getFileStatus(path); assertTrue("check we're actually changing something",newModTime != status.getModificationTime()); long accessTime=status.getAccessTime(); fileSys.setTimes(path,newModTime,-1); status=fileSys.getFileStatus(path); assertEquals(newModTime,status.getModificationTime()); assertEquals(accessTime,status.getAccessTime()); }

Class: org.apache.hadoop.fs.TestPath

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testURI() throws URISyntaxException, IOException { URI uri=new URI("file:///bar#baz"); Path path=new Path(uri); assertTrue(uri.equals(new URI(path.toString()))); FileSystem fs=path.getFileSystem(new Configuration()); assertTrue(uri.equals(new URI(fs.makeQualified(path).toString()))); URI uri2=new URI("file:///bar/baz"); assertTrue(uri2.equals(new URI(fs.makeQualified(new Path(uri2)).toString()))); assertEquals("foo://bar/baz#boo",new Path("foo://bar/",new Path(new URI("/baz#boo"))).toString()); assertEquals("foo://bar/baz/fud#boo",new Path(new Path(new URI("foo://bar/baz#bud")),new Path(new URI("fud#boo"))).toString()); assertEquals("foo://bar/fud#boo",new Path(new Path(new URI("foo://bar/baz#bud")),new Path(new URI("/fud#boo"))).toString()); }

InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
@Test(timeout=30000) public void testGlobEscapeStatus() throws Exception { if (Shell.WINDOWS) return; FileSystem lfs=FileSystem.getLocal(new Configuration()); Path testRoot=lfs.makeQualified(new Path(System.getProperty("test.build.data","test/build/data"),"testPathGlob")); lfs.delete(testRoot,true); lfs.mkdirs(testRoot); assertTrue(lfs.isDirectory(testRoot)); lfs.setWorkingDirectory(testRoot); Path paths[]=new Path[]{new Path(testRoot,"*/f"),new Path(testRoot,"d1/f"),new Path(testRoot,"d2/f")}; Arrays.sort(paths); for ( Path p : paths) { lfs.create(p).close(); assertTrue(lfs.exists(p)); } FileStatus stats[]=lfs.listStatus(new Path(testRoot,"*")); assertEquals(1,stats.length); assertEquals(new Path(testRoot,"*/f"),stats[0].getPath()); stats=lfs.globStatus(new Path(testRoot,"*")); Arrays.sort(stats); Path parentPaths[]=new Path[paths.length]; for (int i=0; i < paths.length; i++) { parentPaths[i]=paths[i].getParent(); } assertEquals(mergeStatuses(parentPaths),mergeStatuses(stats)); stats=lfs.globStatus(new Path(testRoot,"\\*")); assertEquals(1,stats.length); assertEquals(new Path(testRoot,"*"),stats[0].getPath()); stats=lfs.globStatus(new Path(testRoot,"*/f")); assertEquals(paths.length,stats.length); assertEquals(mergeStatuses(paths),mergeStatuses(stats)); stats=lfs.globStatus(new Path(testRoot,"\\*/f")); assertEquals(1,stats.length); assertEquals(new Path(testRoot,"*/f"),stats[0].getPath()); stats=lfs.globStatus(new Path(testRoot,"\\*/*")); assertEquals(1,stats.length); assertEquals(new Path(testRoot,"*/f"),stats[0].getPath()); }

Class: org.apache.hadoop.fs.TestResolveHdfsSymlink

BranchVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
/** * Tests resolution of an hdfs symlink to the local file system. * @throws IOException * @throws InterruptedException */ @Test public void testFcResolveAfs() throws IOException, InterruptedException { Configuration conf=new Configuration(); FileContext fcLocal=FileContext.getLocalFSFileContext(); FileContext fcHdfs=FileContext.getFileContext(cluster.getFileSystem().getUri()); final String localTestRoot=helper.getAbsoluteTestRootDir(fcLocal); Path alphaLocalPath=new Path(fcLocal.getDefaultFileSystem().getUri().toString(),new File(localTestRoot,"alpha").getAbsolutePath()); DFSTestUtil.createFile(FileSystem.getLocal(conf),alphaLocalPath,16,(short)1,2); Path linkTarget=new Path(fcLocal.getDefaultFileSystem().getUri().toString(),localTestRoot); Path hdfsLink=new Path(fcHdfs.getDefaultFileSystem().getUri().toString(),"/tmp/link"); fcHdfs.createSymlink(linkTarget,hdfsLink,true); Path alphaHdfsPathViaLink=new Path(fcHdfs.getDefaultFileSystem().getUri().toString() + "/tmp/link/alpha"); Set afsList=fcHdfs.resolveAbstractFileSystems(alphaHdfsPathViaLink); Assert.assertEquals(2,afsList.size()); for ( AbstractFileSystem afs : afsList) { if ((!afs.equals(fcHdfs.getDefaultFileSystem())) && (!afs.equals(fcLocal.getDefaultFileSystem()))) { Assert.fail("Failed to resolve AFS correctly"); } } }

Class: org.apache.hadoop.fs.TestStat

APIUtilityVerifier InternalCallVerifier BooleanVerifier AssumptionSetter HybridVerifier 
@Test(timeout=10000) public void testStat() throws Exception { Assume.assumeTrue(Stat.isAvailable()); FileSystem fs=FileSystem.getLocal(new Configuration()); Path testDir=new Path(getTestRootPath(fs),"teststat"); fs.mkdirs(testDir); Path sub1=new Path(testDir,"sub1"); Path sub2=new Path(testDir,"sub2"); fs.mkdirs(sub1); fs.createSymlink(sub1,sub2,false); FileStatus stat1=new Stat(sub1,4096l,false,fs).getFileStatus(); FileStatus stat2=new Stat(sub2,0,false,fs).getFileStatus(); assertTrue(stat1.isDirectory()); assertFalse(stat2.isDirectory()); fs.delete(testDir,true); }

UtilityVerifier AssumptionSetter HybridVerifier 
@Test(timeout=10000) public void testStatFileNotFound() throws Exception { Assume.assumeTrue(Stat.isAvailable()); try { stat.getFileStatus(); fail("Expected FileNotFoundException"); } catch ( FileNotFoundException e) { } }

Class: org.apache.hadoop.fs.TestSymlinkHdfs

APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testCreateLinkMaxPathLink() throws IOException { Path dir=new Path(testBaseDir1()); Path file=new Path(testBaseDir1(),"file"); final int maxPathLen=HdfsConstants.MAX_PATH_LENGTH; final int dirLen=dir.toString().length() + 1; int len=maxPathLen - dirLen; StringBuilder sb=new StringBuilder(""); for (int i=0; i < (len / 10); i++) { sb.append("0123456789"); } for (int i=0; i < (len % 10); i++) { sb.append("x"); } Path link=new Path(sb.toString()); assertEquals(maxPathLen,dirLen + link.toString().length()); createAndWriteFile(file); wrapper.setWorkingDirectory(dir); wrapper.createSymlink(file,link,false); readFile(link); link=new Path(sb.toString() + "x"); try { wrapper.createSymlink(file,link,false); fail("Path name should be too long"); } catch ( IOException x) { } }

Class: org.apache.hadoop.fs.TestSymlinkLocalFS

UtilityVerifier AssumptionSetter EqualityVerifier HybridVerifier 
@Test(timeout=1000) public void testGetLinkStatusPartQualTarget() throws IOException { assumeTrue(!emulatingSymlinksOnWindows()); Path fileAbs=new Path(testBaseDir1() + "/file"); Path fileQual=new Path(testURI().toString(),fileAbs); Path dir=new Path(testBaseDir1()); Path link=new Path(testBaseDir1() + "/linkToFile"); Path dirNew=new Path(testBaseDir2()); Path linkNew=new Path(testBaseDir2() + "/linkToFile"); wrapper.delete(dirNew,true); createAndWriteFile(fileQual); wrapper.setWorkingDirectory(dir); wrapper.createSymlink(fileQual,link,false); assertEquals(fileQual,wrapper.getFileLinkStatus(link).getSymlink()); wrapper.rename(dir,dirNew); assertEquals(fileQual,wrapper.getFileLinkStatus(linkNew).getSymlink()); try { readFile(linkNew); fail("The link should be dangling now."); } catch ( FileNotFoundException x) { } Path anotherFs=new Path("hdfs://host:1000/dir/file"); FileUtil.fullyDelete(new File(linkNew.toString())); try { wrapper.createSymlink(anotherFs,linkNew,false); fail("Created a local fs link to a non-local fs"); } catch ( IOException x) { } }

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier AssumptionSetter EqualityVerifier HybridVerifier 
@Test(timeout=1000) public void testDanglingLink() throws IOException { assumeTrue(!Path.WINDOWS); Path fileAbs=new Path(testBaseDir1() + "/file"); Path fileQual=new Path(testURI().toString(),fileAbs); Path link=new Path(testBaseDir1() + "/linkToFile"); Path linkQual=new Path(testURI().toString(),link.toString()); wrapper.createSymlink(fileAbs,link,false); FileUtil.fullyDelete(new File(link.toUri().getPath())); wrapper.createSymlink(fileAbs,link,false); try { wrapper.getFileStatus(link); fail("Got FileStatus for dangling link"); } catch ( FileNotFoundException f) { } UserGroupInformation user=UserGroupInformation.getCurrentUser(); FileStatus fsd=wrapper.getFileLinkStatus(link); assertEquals(fileQual,fsd.getSymlink()); assertTrue(fsd.isSymlink()); assertFalse(fsd.isDirectory()); assertEquals(user.getUserName(),fsd.getOwner()); assertEquals(user.getGroupNames()[0],fsd.getGroup()); assertEquals(linkQual,fsd.getPath()); try { readFile(link); fail("Got FileStatus for dangling link"); } catch ( FileNotFoundException f) { } createAndWriteFile(fileAbs); wrapper.getFileStatus(link); }

Class: org.apache.hadoop.fs.TestSymlinkLocalFSFileSystem

UtilityVerifier BooleanVerifier HybridVerifier 
@Override @Test(timeout=10000) public void testRenameSymlinkToItself() throws IOException { Path file=new Path(testBaseDir1(),"file"); createAndWriteFile(file); Path link=new Path(testBaseDir1(),"linkToFile1"); wrapper.createSymlink(file,link,false); try { wrapper.rename(link,link); fail("Failed to get expected IOException"); } catch ( IOException e) { assertTrue(unwrapException(e) instanceof FileAlreadyExistsException); } try { wrapper.rename(link,link,Rename.OVERWRITE); fail("Failed to get expected IOException"); } catch ( IOException e) { assertTrue(unwrapException(e) instanceof FileAlreadyExistsException || unwrapException(e) instanceof FileNotFoundException); } }

Class: org.apache.hadoop.fs.TestUrlStreamHandler

APIUtilityVerifier IterativeVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Test opening and reading from an InputStream through a hdfs:// URL. *

* First generate a file with some content through the FileSystem API, then * try to open and read the file through the URL stream API. * @throws IOException */ @Test public void testDfsUrls() throws IOException { Configuration conf=new HdfsConfiguration(); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); FileSystem fs=cluster.getFileSystem(); FsUrlStreamHandlerFactory factory=new org.apache.hadoop.fs.FsUrlStreamHandlerFactory(); java.net.URL.setURLStreamHandlerFactory(factory); Path filePath=new Path("/thefile"); try { byte[] fileContent=new byte[1024]; for (int i=0; i < fileContent.length; ++i) fileContent[i]=(byte)i; OutputStream os=fs.create(filePath); os.write(fileContent); os.close(); URI uri=fs.getUri(); URL fileURL=new URL(uri.getScheme(),uri.getHost(),uri.getPort(),filePath.toString()); InputStream is=fileURL.openStream(); assertNotNull(is); byte[] bytes=new byte[4096]; assertEquals(1024,is.read(bytes)); is.close(); for (int i=0; i < fileContent.length; ++i) assertEquals(fileContent[i],bytes[i]); fs.delete(filePath,false); } finally { fs.close(); cluster.shutdown(); } }


APIUtilityVerifier IterativeVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Test opening and reading from an InputStream through a file:// URL. * @throws IOException * @throws URISyntaxException */ @Test public void testFileUrls() throws IOException, URISyntaxException { Configuration conf=new HdfsConfiguration(); if (!TEST_ROOT_DIR.exists()) { if (!TEST_ROOT_DIR.mkdirs()) throw new IOException("Cannot create temporary directory: " + TEST_ROOT_DIR); } File tmpFile=new File(TEST_ROOT_DIR,"thefile"); URI uri=tmpFile.toURI(); FileSystem fs=FileSystem.get(uri,conf); try { byte[] fileContent=new byte[1024]; for (int i=0; i < fileContent.length; ++i) fileContent[i]=(byte)i; OutputStream os=fs.create(new Path(uri.getPath())); os.write(fileContent); os.close(); URL fileURL=uri.toURL(); InputStream is=fileURL.openStream(); assertNotNull(is); byte[] bytes=new byte[4096]; assertEquals(1024,is.read(bytes)); is.close(); for (int i=0; i < fileContent.length; ++i) assertEquals(fileContent[i],bytes[i]); fs.delete(new Path(uri.getPath()),false); } finally { fs.close(); } }

Class: org.apache.hadoop.fs.TestXAttr

BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier 
@Test public void testXAttrEquals(){ assertNotSame(XATTR1,XATTR2); assertNotSame(XATTR2,XATTR3); assertNotSame(XATTR3,XATTR4); assertNotSame(XATTR4,XATTR5); assertEquals(XATTR,XATTR1); assertEquals(XATTR1,XATTR1); assertEquals(XATTR2,XATTR2); assertEquals(XATTR3,XATTR3); assertEquals(XATTR4,XATTR4); assertEquals(XATTR5,XATTR5); assertFalse(XATTR1.equals(XATTR2)); assertFalse(XATTR2.equals(XATTR3)); assertFalse(XATTR3.equals(XATTR4)); assertFalse(XATTR4.equals(XATTR5)); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testXAttrHashCode(){ assertEquals(XATTR.hashCode(),XATTR1.hashCode()); assertFalse(XATTR1.hashCode() == XATTR2.hashCode()); assertFalse(XATTR2.hashCode() == XATTR3.hashCode()); assertFalse(XATTR3.hashCode() == XATTR4.hashCode()); assertFalse(XATTR4.hashCode() == XATTR5.hashCode()); }

Class: org.apache.hadoop.fs.azure.NativeAzureFileSystemBaseTest

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testSetOwnerOnFolder() throws Exception { Path newFolder=new Path("testOwner"); assertTrue(fs.mkdirs(newFolder)); fs.setOwner(newFolder,"newUser",null); FileStatus newStatus=fs.getFileStatus(newFolder); assertNotNull(newStatus); assertEquals("newUser",newStatus.getOwner()); assertTrue(newStatus.isDirectory()); }

BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testReadingDirectoryAsFile() throws Exception { Path dir=new Path("/x"); assertTrue(fs.mkdirs(dir)); try { fs.open(dir).close(); assertTrue("Should've thrown",false); } catch ( FileNotFoundException ex) { assertEquals("/x is a directory not a file.",ex.getMessage()); } }

BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testCreatingFileOverDirectory() throws Exception { Path dir=new Path("/x"); assertTrue(fs.mkdirs(dir)); try { fs.create(dir).close(); assertTrue("Should've thrown",false); } catch ( IOException ex) { assertEquals("Cannot create file /x; already exists as a directory.",ex.getMessage()); } }

InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier 
@Test public void testListSlash() throws Exception { Path testFolder=new Path("/testFolder"); Path testFile=new Path(testFolder,"testFile"); assertTrue(fs.mkdirs(testFolder)); assertTrue(fs.createNewFile(testFile)); FileStatus status=fs.getFileStatus(new Path("/testFolder/.")); assertNotNull(status); }

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testUriEncodingMoreComplexCharacters() throws Exception { String fileName="!#$'()*;=[]%"; String directoryName="*;=[]%!#$'()"; fs.create(new Path(directoryName,fileName)).close(); FileStatus[] listing=fs.listStatus(new Path(directoryName)); assertEquals(1,listing.length); assertEquals(fileName,listing[0].getPath().getName()); FileStatus status=fs.getFileStatus(new Path(directoryName,fileName)); assertEquals(fileName,status.getPath().getName()); InputStream stream=fs.open(new Path(directoryName,fileName)); assertNotNull(stream); stream.close(); assertTrue(fs.delete(new Path(directoryName,fileName),true)); assertTrue(fs.delete(new Path(directoryName),true)); }

InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier 
@Test public void testRenameImplicitFolder() throws Exception { Path testFile=new Path("deep/file/rename/test"); FsPermission permission=FsPermission.createImmutable((short)644); createEmptyFile(testFile,permission); assertTrue(fs.rename(new Path("deep/file"),new Path("deep/renamed"))); assertFalse(fs.exists(testFile)); FileStatus newStatus=fs.getFileStatus(new Path("deep/renamed/rename/test")); assertNotNull(newStatus); assertEqualsIgnoreStickyBit(permission,newStatus.getPermission()); assertTrue(fs.delete(new Path("deep"),true)); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testStatistics() throws Exception { FileSystem.clearStatistics(); FileSystem.Statistics stats=FileSystem.getStatistics("wasb",NativeAzureFileSystem.class); assertEquals(0,stats.getBytesRead()); assertEquals(0,stats.getBytesWritten()); Path newFile=new Path("testStats"); writeString(newFile,"12345678"); assertEquals(8,stats.getBytesWritten()); assertEquals(0,stats.getBytesRead()); String readBack=readString(newFile); assertEquals("12345678",readBack); assertEquals(8,stats.getBytesRead()); assertEquals(8,stats.getBytesWritten()); assertTrue(fs.delete(newFile,true)); assertEquals(8,stats.getBytesRead()); assertEquals(8,stats.getBytesWritten()); }

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testStoreDeleteFolder() throws Exception { Path testFolder=new Path("storeDeleteFolder"); assertFalse(fs.exists(testFolder)); assertTrue(fs.mkdirs(testFolder)); assertTrue(fs.exists(testFolder)); FileStatus status=fs.getFileStatus(testFolder); assertNotNull(status); assertTrue(status.isDirectory()); assertEquals(new FsPermission((short)0755),status.getPermission()); Path innerFile=new Path(testFolder,"innerFile"); assertTrue(fs.createNewFile(innerFile)); assertTrue(fs.exists(innerFile)); assertTrue(fs.delete(testFolder,true)); assertFalse(fs.exists(innerFile)); assertFalse(fs.exists(testFolder)); }

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testStoreRetrieveFile() throws Exception { Path testFile=new Path("unit-test-file"); writeString(testFile,"Testing"); assertTrue(fs.exists(testFile)); FileStatus status=fs.getFileStatus(testFile); assertNotNull(status); assertEquals(new FsPermission((short)0644),status.getPermission()); assertEquals("Testing",readString(testFile)); fs.delete(testFile,true); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testUriEncoding() throws Exception { fs.create(new Path("p/t%5Fe")).close(); FileStatus[] listing=fs.listStatus(new Path("p")); assertEquals(1,listing.length); assertEquals("t%5Fe",listing[0].getPath().getName()); assertTrue(fs.rename(new Path("p"),new Path("q"))); assertTrue(fs.delete(new Path("q"),true)); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testCopyFromLocalFileSystem() throws Exception { Path localFilePath=new Path(System.getProperty("test.build.data","azure_test")); FileSystem localFs=FileSystem.get(new Configuration()); localFs.delete(localFilePath,true); try { writeString(localFs,localFilePath,"Testing"); Path dstPath=new Path("copiedFromLocal"); assertTrue(FileUtil.copy(localFs,localFilePath,fs,dstPath,false,fs.getConf())); assertTrue(fs.exists(dstPath)); assertEquals("Testing",readString(fs,dstPath)); fs.delete(dstPath,true); } finally { localFs.delete(localFilePath,true); } }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testListDirectory() throws Exception { Path rootFolder=new Path("testingList"); assertTrue(fs.mkdirs(rootFolder)); FileStatus[] listed=fs.listStatus(rootFolder); assertEquals(0,listed.length); Path innerFolder=new Path(rootFolder,"inner"); assertTrue(fs.mkdirs(innerFolder)); listed=fs.listStatus(rootFolder); assertEquals(1,listed.length); assertTrue(listed[0].isDirectory()); Path innerFile=new Path(innerFolder,"innerFile"); writeString(innerFile,"testing"); listed=fs.listStatus(rootFolder); assertEquals(1,listed.length); assertTrue(listed[0].isDirectory()); listed=fs.listStatus(innerFolder); assertEquals(1,listed.length); assertFalse(listed[0].isDirectory()); assertTrue(fs.delete(rootFolder,true)); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testSetOwnerOnFile() throws Exception { Path newFile=new Path("testOwner"); OutputStream output=fs.create(newFile); output.write(13); output.close(); fs.setOwner(newFile,"newUser",null); FileStatus newStatus=fs.getFileStatus(newFile); assertNotNull(newStatus); assertEquals("newUser",newStatus.getOwner()); assertEquals("supergroup",newStatus.getGroup()); assertEquals(1,newStatus.getLen()); fs.setOwner(newFile,null,"newGroup"); newStatus=fs.getFileStatus(newFile); assertNotNull(newStatus); assertEquals("newUser",newStatus.getOwner()); assertEquals("newGroup",newStatus.getGroup()); }

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testSetPermissionOnFolder() throws Exception { Path newFolder=new Path("testPermission"); assertTrue(fs.mkdirs(newFolder)); FsPermission newPermission=new FsPermission((short)0600); fs.setPermission(newFolder,newPermission); FileStatus newStatus=fs.getFileStatus(newFolder); assertNotNull(newStatus); assertEquals(newPermission,newStatus.getPermission()); assertTrue(newStatus.isDirectory()); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testSetPermissionOnFile() throws Exception { Path newFile=new Path("testPermission"); OutputStream output=fs.create(newFile); output.write(13); output.close(); FsPermission newPermission=new FsPermission((short)0700); fs.setPermission(newFile,newPermission); FileStatus newStatus=fs.getFileStatus(newFile); assertNotNull(newStatus); assertEquals(newPermission,newStatus.getPermission()); assertEquals("supergroup",newStatus.getGroup()); assertEquals(UserGroupInformation.getCurrentUser().getShortUserName(),newStatus.getOwner()); assertEquals(1,newStatus.getLen()); }

Class: org.apache.hadoop.fs.azure.TestAzureConcurrentOutOfBandIo

APIUtilityVerifier IterativeVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testReadOOBWrites() throws Exception { byte[] dataBlockWrite=new byte[UPLOAD_BLOCK_SIZE]; byte[] dataBlockRead=new byte[UPLOAD_BLOCK_SIZE]; DataOutputStream outputStream=testAccount.getStore().storefile("WASB_String.txt",new PermissionStatus("","",FsPermission.getDefault())); Arrays.fill(dataBlockWrite,(byte)255); for (int i=0; i < NUMBER_OF_BLOCKS; i++) { outputStream.write(dataBlockWrite); } outputStream.flush(); outputStream.close(); DataBlockWriter writeBlockTask=new DataBlockWriter(testAccount,"WASB_String.txt"); writeBlockTask.startWriting(); int count=0; DataInputStream inputStream=null; for (int i=0; i < 5; i++) { try { inputStream=testAccount.getStore().retrieve("WASB_String.txt",0); count=0; int c=0; while (c >= 0) { c=inputStream.read(dataBlockRead,0,UPLOAD_BLOCK_SIZE); if (c < 0) { break; } count+=c; } } catch ( IOException e) { System.out.println(e.getCause().toString()); e.printStackTrace(); fail(); } if (null != inputStream) { inputStream.close(); } } writeBlockTask.stopWriting(); assertEquals(NUMBER_OF_BLOCKS * UPLOAD_BLOCK_SIZE,count); }

Class: org.apache.hadoop.fs.azure.TestBlobMetadata

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Tests that WASB understands the old-style ASV metadata and changes it when * it gets the chance. */ @Test public void testOldPermissionMetadata() throws Exception { Path selfishFile=new Path("/noOneElse"); HashMap metadata=new HashMap(); metadata.put("asv_permission",getExpectedPermissionString("rw-------")); backingStore.setContent(AzureBlobStorageTestAccount.toMockUri(selfishFile),new byte[]{},metadata); FsPermission justMe=new FsPermission(FsAction.READ_WRITE,FsAction.NONE,FsAction.NONE); FileStatus retrievedStatus=fs.getFileStatus(selfishFile); assertNotNull(retrievedStatus); assertEquals(justMe,retrievedStatus.getPermission()); assertEquals(getExpectedOwner(),retrievedStatus.getOwner()); assertEquals(NativeAzureFileSystem.AZURE_DEFAULT_GROUP_DEFAULT,retrievedStatus.getGroup()); FsPermission meAndYou=new FsPermission(FsAction.READ_WRITE,FsAction.READ_WRITE,FsAction.NONE); fs.setPermission(selfishFile,meAndYou); metadata=backingStore.getMetadata(AzureBlobStorageTestAccount.toMockUri(selfishFile)); assertNotNull(metadata); String storedPermission=metadata.get("hdi_permission"); assertEquals(getExpectedPermissionString("rw-rw----"),storedPermission); assertNull(metadata.get("asv_permission")); }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testFolderMetadata() throws Exception { Path folder=new Path("/folder"); FsPermission justRead=new FsPermission(FsAction.READ,FsAction.READ,FsAction.READ); fs.mkdirs(folder,justRead); HashMap metadata=backingStore.getMetadata(AzureBlobStorageTestAccount.toMockUri(folder)); assertNotNull(metadata); assertEquals("true",metadata.get("hdi_isfolder")); assertEquals(getExpectedPermissionString("r--r--r--"),metadata.get("hdi_permission")); }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@SuppressWarnings("deprecation") @Test public void testPermissionMetadata() throws Exception { FsPermission justMe=new FsPermission(FsAction.READ_WRITE,FsAction.NONE,FsAction.NONE); Path selfishFile=new Path("/noOneElse"); fs.create(selfishFile,justMe,true,4096,fs.getDefaultReplication(),fs.getDefaultBlockSize(),null).close(); HashMap metadata=backingStore.getMetadata(AzureBlobStorageTestAccount.toMockUri(selfishFile)); assertNotNull(metadata); String storedPermission=metadata.get("hdi_permission"); assertEquals(getExpectedPermissionString("rw-------"),storedPermission); FileStatus retrievedStatus=fs.getFileStatus(selfishFile); assertNotNull(retrievedStatus); assertEquals(justMe,retrievedStatus.getPermission()); assertEquals(getExpectedOwner(),retrievedStatus.getOwner()); assertEquals(NativeAzureFileSystem.AZURE_DEFAULT_GROUP_DEFAULT,retrievedStatus.getGroup()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Tests that WASB works well with an older version container with ASV-era * version and metadata. */ @Test public void testFirstContainerVersionMetadata() throws Exception { HashMap containerMetadata=new HashMap(); containerMetadata.put(AzureNativeFileSystemStore.OLD_VERSION_METADATA_KEY,AzureNativeFileSystemStore.FIRST_WASB_VERSION); FsWithPreExistingContainer fsWithContainer=FsWithPreExistingContainer.create(containerMetadata); assertFalse(fsWithContainer.getFs().exists(new Path("/IDontExist"))); assertEquals(0,fsWithContainer.getFs().listStatus(new Path("/")).length); assertEquals(AzureNativeFileSystemStore.FIRST_WASB_VERSION,fsWithContainer.getContainerMetadata().get(AzureNativeFileSystemStore.OLD_VERSION_METADATA_KEY)); assertNull(fsWithContainer.getContainerMetadata().get(AzureNativeFileSystemStore.VERSION_METADATA_KEY)); fsWithContainer.getFs().mkdirs(new Path("/dir")); assertEquals(AzureNativeFileSystemStore.CURRENT_WASB_VERSION,fsWithContainer.getContainerMetadata().get(AzureNativeFileSystemStore.VERSION_METADATA_KEY)); assertNull(fsWithContainer.getContainerMetadata().get(AzureNativeFileSystemStore.OLD_VERSION_METADATA_KEY)); fsWithContainer.close(); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Tests that WASB stamped the version in the container metadata if it does a * write operation to a pre-existing container. */ @Test public void testPreExistingContainerVersionMetadata() throws Exception { FsWithPreExistingContainer fsWithContainer=FsWithPreExistingContainer.create(); assertFalse(fsWithContainer.getFs().exists(new Path("/IDontExist"))); assertEquals(0,fsWithContainer.getFs().listStatus(new Path("/")).length); assertNull(fsWithContainer.getContainerMetadata()); fsWithContainer.getFs().mkdirs(new Path("/dir")); assertNotNull(fsWithContainer.getContainerMetadata()); assertEquals(AzureNativeFileSystemStore.CURRENT_WASB_VERSION,fsWithContainer.getContainerMetadata().get(AzureNativeFileSystemStore.VERSION_METADATA_KEY)); fsWithContainer.close(); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Tests that WASB stamped the version in the container metadata. */ @Test public void testContainerVersionMetadata() throws Exception { fs.createNewFile(new Path("/foo")); HashMap containerMetadata=backingStore.getContainerMetadata(); assertNotNull(containerMetadata); assertEquals(AzureNativeFileSystemStore.CURRENT_WASB_VERSION,containerMetadata.get(AzureNativeFileSystemStore.VERSION_METADATA_KEY)); }

Class: org.apache.hadoop.fs.azure.TestContainerChecks

InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier 
@Test public void testContainerCreateAfterDoesNotExist() throws Exception { testAccount=AzureBlobStorageTestAccount.create("",EnumSet.noneOf(CreateOptions.class)); assumeNotNull(testAccount); CloudBlobContainer container=testAccount.getRealContainer(); FileSystem fs=testAccount.getFileSystem(); assertFalse(container.exists()); try { assertNull(fs.listStatus(new Path("/"))); assertTrue("Should've thrown.",false); } catch ( FileNotFoundException ex) { assertTrue("Unexpected exception: " + ex,ex.getMessage().contains("does not exist.")); } assertFalse(container.exists()); container.create(); assertTrue(fs.createNewFile(new Path("/foo"))); assertTrue(container.exists()); }

Class: org.apache.hadoop.fs.azure.TestNativeAzureFileSystemConcurrency

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier 
@Test public void testLinkBlobs() throws Exception { Path filePath=new Path("/inProgress"); FSDataOutputStream outputStream=fs.create(filePath); HashMap metadata=backingStore.getMetadata(AzureBlobStorageTestAccount.toMockUri(filePath)); assertNotNull(metadata); String linkValue=metadata.get(AzureNativeFileSystemStore.LINK_BACK_TO_UPLOAD_IN_PROGRESS_METADATA_KEY); assertNotNull(linkValue); assertTrue(backingStore.exists(AzureBlobStorageTestAccount.toMockUri(linkValue))); assertTrue(fs.exists(filePath)); outputStream.close(); metadata=backingStore.getMetadata(AzureBlobStorageTestAccount.toMockUri(filePath)); assertNull(metadata.get(AzureNativeFileSystemStore.LINK_BACK_TO_UPLOAD_IN_PROGRESS_METADATA_KEY)); }

Class: org.apache.hadoop.fs.azure.TestNativeAzureFileSystemFileNameCheck

UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
@Test public void testRename() throws Exception { Path testFile1=new Path(root + "/testFile1"); assertTrue(fs.createNewFile(testFile1)); Path testFile2=new Path(root + "/testFile2"); fs.rename(testFile1,testFile2); assertTrue(!fs.exists(testFile1) && fs.exists(testFile2)); Path testFile3=new Path(root + "/testFile3:3"); try { fs.rename(testFile2,testFile3); fail("Should've thrown."); } catch ( IOException e) { } assertTrue(fs.exists(testFile2)); }

UtilityVerifier BooleanVerifier HybridVerifier 
@Test public void testCreate() throws Exception { Path testFile1=new Path(root + "/testFile1"); assertTrue(fs.createNewFile(testFile1)); Path testFile2=new Path(root + "/testFile2:2"); try { fs.createNewFile(testFile2); fail("Should've thrown."); } catch ( IOException e) { } }

UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
@Test public void testMkdirs() throws Exception { Path testFolder1=new Path(root + "/testFolder1"); assertTrue(fs.mkdirs(testFolder1)); Path testFolder2=new Path(root + "/testFolder2:2"); try { assertTrue(fs.mkdirs(testFolder2)); fail("Should've thrown."); } catch ( IOException e) { } }

Class: org.apache.hadoop.fs.azure.TestOutOfBandAzureBlobOperations

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testImplicitFolderListed() throws Exception { createEmptyBlobOutOfBand("root/b"); FileStatus[] obtained=fs.listStatus(new Path("/root/b")); assertNotNull(obtained); assertEquals(1,obtained.length); assertFalse(obtained[0].isDirectory()); assertEquals("/root/b",obtained[0].getPath().toUri().getPath()); obtained=fs.listStatus(new Path("/root")); assertNotNull(obtained); assertEquals(1,obtained.length); assertFalse(obtained[0].isDirectory()); assertEquals("/root/b",obtained[0].getPath().toUri().getPath()); FileStatus dirStatus=fs.getFileStatus(new Path("/root")); assertNotNull(dirStatus); assertTrue(dirStatus.isDirectory()); assertEquals("/root",dirStatus.getPath().toUri().getPath()); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testSetOwnerOnImplicitFolder() throws Exception { createEmptyBlobOutOfBand("root/b"); fs.setOwner(new Path("/root"),"newOwner",null); FileStatus newStatus=fs.getFileStatus(new Path("/root")); assertNotNull(newStatus); assertEquals("newOwner",newStatus.getOwner()); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testSetPermissionOnImplicitFolder() throws Exception { createEmptyBlobOutOfBand("root/b"); FsPermission newPermission=new FsPermission((short)0600); fs.setPermission(new Path("/root"),newPermission); FileStatus newStatus=fs.getFileStatus(new Path("/root")); assertNotNull(newStatus); assertEquals(newPermission,newStatus.getPermission()); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testFileAndImplicitFolderSameName() throws Exception { createEmptyBlobOutOfBand("root/b"); createEmptyBlobOutOfBand("root/b/c"); FileStatus[] listResult=fs.listStatus(new Path("/root/b")); assertEquals(1,listResult.length); assertFalse(listResult[0].isDirectory()); try { fs.delete(new Path("/root/b/c"),true); assertTrue("Should've thrown.",false); } catch ( AzureException e) { assertEquals("File /root/b/c has a parent directory /root/b" + " which is also a file. Can't resolve.",e.getMessage()); } }

Class: org.apache.hadoop.fs.azure.TestWasbFsck

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Tests that we delete dangling files properly */ @Test public void testDelete() throws Exception { Path danglingFile=new Path("/crashedInTheMiddle"); FSDataOutputStream stream=fs.create(danglingFile); stream.write(new byte[]{1,2,3}); stream.flush(); FileStatus fileStatus=fs.getFileStatus(danglingFile); assertNotNull(fileStatus); assertEquals(0,fileStatus.getLen()); assertEquals(1,getNumTempBlobs()); runFsck("-delete"); assertEquals(0,getNumTempBlobs()); assertFalse(fs.exists(danglingFile)); }

Class: org.apache.hadoop.fs.azure.TestWasbUriAndConfiguration

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testConnectUsingSASReadonly() throws Exception { testAccount=AzureBlobStorageTestAccount.create("",EnumSet.of(CreateOptions.UseSas,CreateOptions.CreateContainer,CreateOptions.Readonly)); assumeNotNull(testAccount); final String blobKey="blobForReadonly"; CloudBlobContainer container=testAccount.getRealContainer(); CloudBlockBlob blob=container.getBlockBlobReference(blobKey); ByteArrayInputStream inputStream=new ByteArrayInputStream(new byte[]{1,2,3}); blob.upload(inputStream,3); inputStream.close(); Path filePath=new Path("/" + blobKey); FileSystem fs=testAccount.getFileSystem(); assertTrue(fs.exists(filePath)); byte[] obtained=new byte[3]; DataInputStream obtainedInputStream=fs.open(filePath); obtainedInputStream.readFully(obtained); obtainedInputStream.close(); assertEquals(3,obtained[2]); }

APIUtilityVerifier UtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Tests the cases when the URI is specified with no authority, i.e. * wasb:///path/to/file. */ @Test public void testNoUriAuthority() throws Exception { String[] wasbAliases=new String[]{"wasb","wasbs"}; for ( String defaultScheme : wasbAliases) { for ( String wantedScheme : wasbAliases) { testAccount=AzureBlobStorageTestAccount.createMock(); Configuration conf=testAccount.getFileSystem().getConf(); String authority=testAccount.getFileSystem().getUri().getAuthority(); URI defaultUri=new URI(defaultScheme,authority,null,null,null); conf.set("fs.default.name",defaultUri.toString()); URI wantedUri=new URI(wantedScheme + ":///random/path"); NativeAzureFileSystem obtained=(NativeAzureFileSystem)FileSystem.get(wantedUri,conf); assertNotNull(obtained); assertEquals(new URI(wantedScheme,authority,null,null,null),obtained.getUri()); Path qualified=obtained.makeQualified(new Path(wantedUri)); assertEquals(new URI(wantedScheme,authority,wantedUri.getPath(),null,null),qualified.toUri()); testAccount.cleanup(); FileSystem.closeAll(); } } testAccount=AzureBlobStorageTestAccount.createMock(); Configuration conf=testAccount.getFileSystem().getConf(); conf.set("fs.default.name","file:///"); try { FileSystem.get(new URI("wasb:///random/path"),conf); fail("Should've thrown."); } catch ( IllegalArgumentException e) { } }

Class: org.apache.hadoop.fs.azure.metrics.TestAzureFileSystemInstrumentation

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testMetricsOnMkdirList() throws Exception { long base=getBaseWebResponses(); assertTrue(fs.mkdirs(new Path("a"))); base=assertWebResponsesInRange(base,1,12); assertEquals(1,AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(),WASB_DIRECTORIES_CREATED)); assertEquals(1,fs.listStatus(new Path("/")).length); base=assertWebResponsesEquals(base,1); assertNoErrors(); }

BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testClientErrorMetrics() throws Exception { String directoryName="metricsTestDirectory_ClientError"; Path directoryPath=new Path("/" + directoryName); assertTrue(fs.mkdirs(directoryPath)); String leaseID=testAccount.acquireShortLease(directoryName); try { try { fs.delete(directoryPath,true); assertTrue("Should've thrown.",false); } catch ( AzureException ex) { assertTrue("Unexpected exception: " + ex,ex.getMessage().contains("lease")); } assertEquals(1,AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(),WASB_CLIENT_ERRORS)); assertEquals(0,AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(),WASB_SERVER_ERRORS)); } finally { testAccount.releaseLease(leaseID,directoryName); } }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testMetricsOnFileRename() throws Exception { long base=getBaseWebResponses(); Path originalPath=new Path("/metricsTest_RenameStart"); Path destinationPath=new Path("/metricsTest_RenameFinal"); assertEquals(0,AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(),WASB_FILES_CREATED)); assertTrue(fs.createNewFile(originalPath)); logOpResponseCount("Creating an empty file",base); base=assertWebResponsesInRange(base,2,20); assertEquals(1,AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(),WASB_FILES_CREATED)); assertTrue(fs.rename(originalPath,destinationPath)); logOpResponseCount("Renaming a file",base); base=assertWebResponsesInRange(base,2,15); assertNoErrors(); }

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testMetricsOnFileCreateRead() throws Exception { long base=getBaseWebResponses(); assertEquals(0,AzureMetricsTestUtil.getCurrentBytesWritten(getInstrumentation())); Path filePath=new Path("/metricsTest_webResponses"); final int FILE_SIZE=1000; getBandwidthGaugeUpdater().suppressAutoUpdate(); Date start=new Date(); OutputStream outputStream=fs.create(filePath); outputStream.write(nonZeroByteArray(FILE_SIZE)); outputStream.close(); long uploadDurationMs=new Date().getTime() - start.getTime(); logOpResponseCount("Creating a 1K file",base); base=assertWebResponsesInRange(base,2,15); getBandwidthGaugeUpdater().triggerUpdate(true); long bytesWritten=AzureMetricsTestUtil.getCurrentBytesWritten(getInstrumentation()); assertTrue("The bytes written in the last second " + bytesWritten + " is pretty far from the expected range of around "+ FILE_SIZE+ " bytes plus a little overhead.",bytesWritten > (FILE_SIZE / 2) && bytesWritten < (FILE_SIZE * 2)); long totalBytesWritten=AzureMetricsTestUtil.getCurrentTotalBytesWritten(getInstrumentation()); assertTrue("The total bytes written " + totalBytesWritten + " is pretty far from the expected range of around "+ FILE_SIZE+ " bytes plus a little overhead.",totalBytesWritten >= FILE_SIZE && totalBytesWritten < (FILE_SIZE * 2)); long uploadRate=AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(),WASB_UPLOAD_RATE); System.out.println("Upload rate: " + uploadRate + " bytes/second."); long expectedRate=(FILE_SIZE * 1000L) / uploadDurationMs; assertTrue("The upload rate " + uploadRate + " is below the expected range of around "+ expectedRate+ " bytes/second that the unit test observed. This should never be"+ " the case since the test underestimates the rate by looking at "+ " end-to-end time instead of just block upload time.",uploadRate >= expectedRate); long uploadLatency=AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(),WASB_UPLOAD_LATENCY); System.out.println("Upload latency: " + uploadLatency); long expectedLatency=uploadDurationMs; assertTrue("The upload latency " + uploadLatency + " should be greater than zero now that I've just uploaded a file.",uploadLatency > 0); assertTrue("The upload latency " + uploadLatency + " is more than the expected range of around "+ expectedLatency+ " milliseconds that the unit test observed. This should never be"+ " the case since the test overestimates the latency by looking at "+ " end-to-end time instead of just block upload time.",uploadLatency <= expectedLatency); start=new Date(); InputStream inputStream=fs.open(filePath); int count=0; while (inputStream.read() >= 0) { count++; } inputStream.close(); long downloadDurationMs=new Date().getTime() - start.getTime(); assertEquals(FILE_SIZE,count); logOpResponseCount("Reading a 1K file",base); base=assertWebResponsesInRange(base,1,10); getBandwidthGaugeUpdater().triggerUpdate(false); long totalBytesRead=AzureMetricsTestUtil.getCurrentTotalBytesRead(getInstrumentation()); assertEquals(FILE_SIZE,totalBytesRead); long bytesRead=AzureMetricsTestUtil.getCurrentBytesRead(getInstrumentation()); assertTrue("The bytes read in the last second " + bytesRead + " is pretty far from the expected range of around "+ FILE_SIZE+ " bytes plus a little overhead.",bytesRead > (FILE_SIZE / 2) && bytesRead < (FILE_SIZE * 2)); long downloadRate=AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(),WASB_DOWNLOAD_RATE); System.out.println("Download rate: " + downloadRate + " bytes/second."); expectedRate=(FILE_SIZE * 1000L) / downloadDurationMs; assertTrue("The download rate " + downloadRate + " is below the expected range of around "+ expectedRate+ " bytes/second that the unit test observed. This should never be"+ " the case since the test underestimates the rate by looking at "+ " end-to-end time instead of just block download time.",downloadRate >= expectedRate); long downloadLatency=AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(),WASB_DOWNLOAD_LATENCY); System.out.println("Download latency: " + downloadLatency); expectedLatency=downloadDurationMs; assertTrue("The download latency " + downloadLatency + " should be greater than zero now that I've just downloaded a file.",downloadLatency > 0); assertTrue("The download latency " + downloadLatency + " is more than the expected range of around "+ expectedLatency+ " milliseconds that the unit test observed. This should never be"+ " the case since the test overestimates the latency by looking at "+ " end-to-end time instead of just block download time.",downloadLatency <= expectedLatency); assertNoErrors(); }

BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testMetricsOnBigFileCreateRead() throws Exception { long base=getBaseWebResponses(); assertEquals(0,AzureMetricsTestUtil.getCurrentBytesWritten(getInstrumentation())); Path filePath=new Path("/metricsTest_webResponses"); final int FILE_SIZE=100 * 1024 * 1024; getBandwidthGaugeUpdater().suppressAutoUpdate(); OutputStream outputStream=fs.create(filePath); outputStream.write(new byte[FILE_SIZE]); outputStream.close(); logOpResponseCount("Creating a 100 MB file",base); base=assertWebResponsesInRange(base,20,50); getBandwidthGaugeUpdater().triggerUpdate(true); long totalBytesWritten=AzureMetricsTestUtil.getCurrentTotalBytesWritten(getInstrumentation()); assertTrue("The total bytes written " + totalBytesWritten + " is pretty far from the expected range of around "+ FILE_SIZE+ " bytes plus a little overhead.",totalBytesWritten >= FILE_SIZE && totalBytesWritten < (FILE_SIZE * 2)); long uploadRate=AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(),WASB_UPLOAD_RATE); System.out.println("Upload rate: " + uploadRate + " bytes/second."); long uploadLatency=AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(),WASB_UPLOAD_LATENCY); System.out.println("Upload latency: " + uploadLatency); assertTrue("The upload latency " + uploadLatency + " should be greater than zero now that I've just uploaded a file.",uploadLatency > 0); InputStream inputStream=fs.open(filePath); int count=0; while (inputStream.read() >= 0) { count++; } inputStream.close(); assertEquals(FILE_SIZE,count); logOpResponseCount("Reading a 100 MB file",base); base=assertWebResponsesInRange(base,20,40); getBandwidthGaugeUpdater().triggerUpdate(false); long totalBytesRead=AzureMetricsTestUtil.getCurrentTotalBytesRead(getInstrumentation()); assertEquals(FILE_SIZE,totalBytesRead); long downloadRate=AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(),WASB_DOWNLOAD_RATE); System.out.println("Download rate: " + downloadRate + " bytes/second."); long downloadLatency=AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(),WASB_DOWNLOAD_LATENCY); System.out.println("Download latency: " + downloadLatency); assertTrue("The download latency " + downloadLatency + " should be greater than zero now that I've just downloaded a file.",downloadLatency > 0); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testMetricsOnFileExistsDelete() throws Exception { long base=getBaseWebResponses(); Path filePath=new Path("/metricsTest_delete"); assertFalse(fs.exists(filePath)); logOpResponseCount("Checking file existence for non-existent file",base); base=assertWebResponsesInRange(base,1,3); assertTrue(fs.createNewFile(filePath)); base=getCurrentWebResponses(); assertTrue(fs.exists(filePath)); logOpResponseCount("Checking file existence for existent file",base); base=assertWebResponsesInRange(base,1,2); assertEquals(0,AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(),WASB_FILES_DELETED)); assertTrue(fs.delete(filePath,false)); logOpResponseCount("Deleting a file",base); base=assertWebResponsesInRange(base,1,4); assertEquals(1,AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(),WASB_FILES_DELETED)); assertNoErrors(); }

Class: org.apache.hadoop.fs.azure.metrics.TestBandwidthGaugeUpdater

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testSingleThreaded() throws Exception { AzureFileSystemInstrumentation instrumentation=new AzureFileSystemInstrumentation(new Configuration()); BandwidthGaugeUpdater updater=new BandwidthGaugeUpdater(instrumentation,1000,true); updater.triggerUpdate(true); assertEquals(0,AzureMetricsTestUtil.getCurrentBytesWritten(instrumentation)); updater.blockUploaded(new Date(),new Date(),150); updater.triggerUpdate(true); assertEquals(150,AzureMetricsTestUtil.getCurrentBytesWritten(instrumentation)); updater.blockUploaded(new Date(new Date().getTime() - 10000),new Date(),200); updater.triggerUpdate(true); long currentBytes=AzureMetricsTestUtil.getCurrentBytesWritten(instrumentation); assertTrue("We expect around (200/10 = 20) bytes written as the gauge value." + "Got " + currentBytes,currentBytes > 18 && currentBytes < 22); updater.close(); }

BooleanVerifier AssumptionSetter HybridVerifier 
@Test public void testFinalizerThreadShutdown() throws Exception { System.gc(); System.runFinalization(); int nUpdaterThreadsStart=getWasbThreadCount(); assertTrue("Existing WASB threads have not been cleared",nUpdaterThreadsStart == 0); final int nFilesystemsToSpawn=10; AzureBlobStorageTestAccount testAccount=null; for (int i=0; i < nFilesystemsToSpawn; i++) { testAccount=AzureBlobStorageTestAccount.createMock(); testAccount.getFileSystem(); } int nUpdaterThreadsAfterSpawn=getWasbThreadCount(); Assume.assumeTrue("Background threads should have spawned.",nUpdaterThreadsAfterSpawn == 10); testAccount=null; System.gc(); System.runFinalization(); int nUpdaterThreadsAfterCleanup=getWasbThreadCount(); assertTrue("Finalizers should have reduced the thread count. ",nUpdaterThreadsAfterCleanup == 0); }

Class: org.apache.hadoop.fs.contract.AbstractContractRenameTest

APIUtilityVerifier BranchVerifier UtilityVerifier BooleanVerifier HybridVerifier 
/** * Rename test -handles filesystems that will overwrite the destination * as well as those that do not (i.e. HDFS). * @throws Throwable */ @Test public void testRenameFileOverExistingFile() throws Throwable { describe("Verify renaming a file onto an existing file matches expectations"); Path srcFile=path("source-256.txt"); byte[] srcData=dataset(256,'a','z'); writeDataset(getFileSystem(),srcFile,srcData,srcData.length,1024,false); Path destFile=path("dest-512.txt"); byte[] destData=dataset(512,'A','Z'); writeDataset(getFileSystem(),destFile,destData,destData.length,1024,false); assertIsFile(destFile); boolean renameOverwritesDest=isSupported(RENAME_OVERWRITES_DEST); boolean renameReturnsFalseOnRenameDestExists=!isSupported(RENAME_RETURNS_FALSE_IF_DEST_EXISTS); boolean destUnchanged=true; try { boolean renamed=rename(srcFile,destFile); if (renameOverwritesDest) { assertTrue("Rename returned false",renamed); destUnchanged=false; } else { if (renamed && !renameReturnsFalseOnRenameDestExists) { String destDirLS=generateAndLogErrorListing(srcFile,destFile); getLog().error("dest dir {}",destDirLS); fail("expected rename(" + srcFile + ", "+ destFile+ " ) to fail,"+ " but got success and destination of "+ destDirLS); } } } catch ( FileAlreadyExistsException e) { handleExpectedException(e); } ContractTestUtils.verifyFileContents(getFileSystem(),destFile,destUnchanged ? destData : srcData); }

APIUtilityVerifier BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
@Test public void testRenameNonexistentFile() throws Throwable { describe("rename a file into a new file in the same directory"); Path missing=path("testRenameNonexistentFileSrc"); Path target=path("testRenameNonexistentFileDest"); boolean renameReturnsFalseOnFailure=isSupported(ContractOptions.RENAME_RETURNS_FALSE_IF_SOURCE_MISSING); mkdirs(missing.getParent()); try { boolean renamed=rename(missing,target); if (!renameReturnsFalseOnFailure) { String destDirLS=generateAndLogErrorListing(missing,target); fail("expected rename(" + missing + ", "+ target+ " ) to fail,"+ " got a result of "+ renamed+ " and a destination directory of "+ destDirLS); } else { getLog().warn("Rename returned {} renaming a nonexistent file",renamed); assertFalse("Renaming a missing file returned true",renamed); } } catch ( FileNotFoundException e) { if (renameReturnsFalseOnFailure) { ContractTestUtils.fail("Renaming a missing file unexpectedly threw an exception",e); } handleExpectedException(e); } catch ( IOException e) { handleRelaxedException("rename nonexistent file","FileNotFoundException",e); } assertPathDoesNotExist("rename nonexistent file created a destination file",target); }

Class: org.apache.hadoop.fs.contract.AbstractContractSeekTest

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testSeekAndReadPastEndOfFile() throws Throwable { describe("verify that reading past the last bytes in the file returns -1"); instream=getFileSystem().open(smallSeekFile); assertEquals(0,instream.getPos()); instream.seek(TEST_FILE_LEN - 2); assertTrue("Premature EOF",instream.read() != -1); assertTrue("Premature EOF",instream.read() != -1); assertMinusOne("read past end of file",instream.read()); }

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testPositionedBulkReadDoesntChangePosition() throws Throwable { describe("verify that a positioned read does not change the getPos() value"); Path testSeekFile=path("bigseekfile.txt"); byte[] block=dataset(65536,0,255); createFile(getFileSystem(),testSeekFile,false,block); instream=getFileSystem().open(testSeekFile); instream.seek(39999); assertTrue(-1 != instream.read()); assertEquals(40000,instream.getPos()); byte[] readBuffer=new byte[256]; instream.read(128,readBuffer,0,readBuffer.length); assertEquals(40000,instream.getPos()); assertEquals("@40000",block[40000],(byte)instream.read()); for (int i=0; i < 256; i++) { assertEquals("@" + i,block[i + 128],readBuffer[i]); } }

APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testNegativeSeek() throws Throwable { instream=getFileSystem().open(smallSeekFile); assertEquals(0,instream.getPos()); try { instream.seek(-1); long p=instream.getPos(); LOG.warn("Seek to -1 returned a position of " + p); int result=instream.read(); fail("expected an exception, got data " + result + " at a position of "+ p); } catch ( EOFException e) { handleExpectedException(e); } catch ( IOException e) { handleRelaxedException("a negative seek","EOFException",e); } assertEquals(0,instream.getPos()); }

Class: org.apache.hadoop.fs.contract.AbstractFSContractTestBase

TestInitializer InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Setup: create the contract then init it * @throws Exception on any failure */ @Before public void setup() throws Exception { contract=createContract(createConfiguration()); contract.init(); assumeEnabled(); fileSystem=contract.getTestFileSystem(); assertNotNull("null filesystem",fileSystem); URI fsURI=fileSystem.getUri(); LOG.info("Test filesystem = {} implemented by {}",fsURI,fileSystem); assertEquals("wrong filesystem of " + fsURI,contract.getScheme(),fsURI.getScheme()); testPath=getContract().getTestPath(); mkdirs(testPath); }

Class: org.apache.hadoop.fs.contract.localfs.TestLocalFSContractLoaded

BooleanVerifier NullVerifier HybridVerifier 
@Test public void testContractWorks() throws Throwable { String key=getContract().getConfKey(SUPPORTS_ATOMIC_RENAME); assertNotNull("not set: " + key,getContract().getConf().get(key)); assertTrue("not true: " + key,getContract().isSupported(SUPPORTS_ATOMIC_RENAME,false)); }

Class: org.apache.hadoop.fs.http.server.TestHttpFSServer

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test ACL operations on a directory, including default ACLs. * General strategy is to use GETFILESTATUS and GETACLSTATUS to verify: *
    *
  1. Initial status with no ACLs
  2. *
  3. The addition of a default ACL
  4. *
  5. The removal of default ACLs
  6. *
* @throws Exception */ @Test @TestDir @TestJetty @TestHdfs public void testDirAcls() throws Exception { final String defUser1="default:user:glarch:r-x"; final String defSpec1="aclspec=" + defUser1; final String dir="/aclDirTest"; String statusJson; List aclEntries; createHttpFSServer(false); FileSystem fs=FileSystem.get(TestHdfsHelper.getHdfsConf()); fs.mkdirs(new Path(dir)); statusJson=getStatus(dir,"GETFILESTATUS"); Assert.assertEquals(-1,statusJson.indexOf("aclBit")); statusJson=getStatus(dir,"GETACLSTATUS"); aclEntries=getAclEntries(statusJson); Assert.assertTrue(aclEntries.size() == 0); putCmd(dir,"SETACL",defSpec1); statusJson=getStatus(dir,"GETFILESTATUS"); Assert.assertNotEquals(-1,statusJson.indexOf("aclBit")); statusJson=getStatus(dir,"GETACLSTATUS"); aclEntries=getAclEntries(statusJson); Assert.assertTrue(aclEntries.size() == 5); Assert.assertTrue(aclEntries.contains(defUser1)); putCmd(dir,"REMOVEDEFAULTACL",null); statusJson=getStatus(dir,"GETFILESTATUS"); Assert.assertEquals(-1,statusJson.indexOf("aclBit")); statusJson=getStatus(dir,"GETACLSTATUS"); aclEntries=getAclEntries(statusJson); Assert.assertTrue(aclEntries.size() == 0); }

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test @TestDir @TestJetty @TestHdfs public void instrumentation() throws Exception { createHttpFSServer(false); URL url=new URL(TestJettyHelper.getJettyURL(),MessageFormat.format("/webhdfs/v1?user.name={0}&op=instrumentation","nobody")); HttpURLConnection conn=(HttpURLConnection)url.openConnection(); Assert.assertEquals(conn.getResponseCode(),HttpURLConnection.HTTP_UNAUTHORIZED); url=new URL(TestJettyHelper.getJettyURL(),MessageFormat.format("/webhdfs/v1?user.name={0}&op=instrumentation",HadoopUsersConfTestHelper.getHadoopUsers()[0])); conn=(HttpURLConnection)url.openConnection(); Assert.assertEquals(conn.getResponseCode(),HttpURLConnection.HTTP_OK); BufferedReader reader=new BufferedReader(new InputStreamReader(conn.getInputStream())); String line=reader.readLine(); reader.close(); Assert.assertTrue(line.contains("\"counters\":{")); url=new URL(TestJettyHelper.getJettyURL(),MessageFormat.format("/webhdfs/v1/foo?user.name={0}&op=instrumentation",HadoopUsersConfTestHelper.getHadoopUsers()[0])); conn=(HttpURLConnection)url.openConnection(); Assert.assertEquals(conn.getResponseCode(),HttpURLConnection.HTTP_BAD_REQUEST); }

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Validate the various ACL set/modify/remove calls. General strategy is * to verify each of the following steps with GETFILESTATUS, LISTSTATUS, * and GETACLSTATUS: *
    *
  1. Create a file with no ACLs
  2. *
  3. Add a user + group ACL
  4. *
  5. Add another user ACL
  6. *
  7. Remove the first user ACL
  8. *
  9. Remove all ACLs
  10. *
*/ @Test @TestDir @TestJetty @TestHdfs public void testFileAcls() throws Exception { final String aclUser1="user:foo:rw-"; final String aclUser2="user:bar:r--"; final String aclGroup1="group::r--"; final String aclSpec="aclspec=user::rwx," + aclUser1 + ","+ aclGroup1+ ",other::---"; final String modAclSpec="aclspec=" + aclUser2; final String remAclSpec="aclspec=" + aclUser1; final String dir="/aclFileTest"; final String path=dir + "/test"; String statusJson; List aclEntries; createHttpFSServer(false); FileSystem fs=FileSystem.get(TestHdfsHelper.getHdfsConf()); fs.mkdirs(new Path(dir)); createWithHttp(path,null); statusJson=getStatus(path,"GETFILESTATUS"); Assert.assertEquals(-1,statusJson.indexOf("aclBit")); statusJson=getStatus(dir,"LISTSTATUS"); Assert.assertEquals(-1,statusJson.indexOf("aclBit")); statusJson=getStatus(path,"GETACLSTATUS"); aclEntries=getAclEntries(statusJson); Assert.assertTrue(aclEntries.size() == 0); putCmd(path,"SETACL",aclSpec); statusJson=getStatus(path,"GETFILESTATUS"); Assert.assertNotEquals(-1,statusJson.indexOf("aclBit")); statusJson=getStatus(dir,"LISTSTATUS"); Assert.assertNotEquals(-1,statusJson.indexOf("aclBit")); statusJson=getStatus(path,"GETACLSTATUS"); aclEntries=getAclEntries(statusJson); Assert.assertTrue(aclEntries.size() == 2); Assert.assertTrue(aclEntries.contains(aclUser1)); Assert.assertTrue(aclEntries.contains(aclGroup1)); putCmd(path,"MODIFYACLENTRIES",modAclSpec); statusJson=getStatus(path,"GETACLSTATUS"); aclEntries=getAclEntries(statusJson); Assert.assertTrue(aclEntries.size() == 3); Assert.assertTrue(aclEntries.contains(aclUser1)); Assert.assertTrue(aclEntries.contains(aclUser2)); Assert.assertTrue(aclEntries.contains(aclGroup1)); putCmd(path,"REMOVEACLENTRIES",remAclSpec); statusJson=getStatus(path,"GETACLSTATUS"); aclEntries=getAclEntries(statusJson); Assert.assertTrue(aclEntries.size() == 2); Assert.assertTrue(aclEntries.contains(aclUser2)); Assert.assertTrue(aclEntries.contains(aclGroup1)); putCmd(path,"REMOVEACL",null); statusJson=getStatus(path,"GETACLSTATUS"); aclEntries=getAclEntries(statusJson); Assert.assertTrue(aclEntries.size() == 0); statusJson=getStatus(path,"GETFILESTATUS"); Assert.assertEquals(-1,statusJson.indexOf("aclBit")); statusJson=getStatus(dir,"LISTSTATUS"); Assert.assertEquals(-1,statusJson.indexOf("aclBit")); }

Class: org.apache.hadoop.fs.permission.TestAcl

InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier 
@Test public void testEntryEquals(){ assertNotSame(ENTRY1,ENTRY2); assertNotSame(ENTRY1,ENTRY3); assertNotSame(ENTRY1,ENTRY4); assertNotSame(ENTRY2,ENTRY3); assertNotSame(ENTRY2,ENTRY4); assertNotSame(ENTRY3,ENTRY4); assertEquals(ENTRY1,ENTRY1); assertEquals(ENTRY2,ENTRY2); assertEquals(ENTRY1,ENTRY2); assertEquals(ENTRY2,ENTRY1); assertFalse(ENTRY1.equals(ENTRY3)); assertFalse(ENTRY1.equals(ENTRY4)); assertFalse(ENTRY3.equals(ENTRY4)); assertFalse(ENTRY1.equals(null)); assertFalse(ENTRY1.equals(new Object())); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testStatusHashCode(){ assertEquals(STATUS1.hashCode(),STATUS2.hashCode()); assertFalse(STATUS1.hashCode() == STATUS3.hashCode()); }

InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier 
@Test public void testStatusEquals(){ assertNotSame(STATUS1,STATUS2); assertNotSame(STATUS1,STATUS3); assertNotSame(STATUS2,STATUS3); assertEquals(STATUS1,STATUS1); assertEquals(STATUS2,STATUS2); assertEquals(STATUS1,STATUS2); assertEquals(STATUS2,STATUS1); assertFalse(STATUS1.equals(STATUS3)); assertFalse(STATUS2.equals(STATUS3)); assertFalse(STATUS1.equals(null)); assertFalse(STATUS1.equals(new Object())); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testEntryHashCode(){ assertEquals(ENTRY1.hashCode(),ENTRY2.hashCode()); assertFalse(ENTRY1.hashCode() == ENTRY3.hashCode()); assertFalse(ENTRY1.hashCode() == ENTRY4.hashCode()); assertFalse(ENTRY3.hashCode() == ENTRY4.hashCode()); }

Class: org.apache.hadoop.fs.shell.TestCommandFactory

TestInitializer NullVerifier HybridVerifier 
@Before public void testSetup(){ factory=new CommandFactory(conf); assertNotNull(factory); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testGetInstances(){ factory.registerCommands(TestRegistrar.class); Command instance; instance=factory.getInstance("blarg"); assertNull(instance); instance=factory.getInstance("tc1"); assertNotNull(instance); assertEquals(TestCommand1.class,instance.getClass()); assertEquals("tc1",instance.getCommandName()); instance=factory.getInstance("tc2"); assertNotNull(instance); assertEquals(TestCommand2.class,instance.getClass()); assertEquals("tc2",instance.getCommandName()); instance=factory.getInstance("tc2.1"); assertNotNull(instance); assertEquals(TestCommand2.class,instance.getClass()); assertEquals("tc2.1",instance.getCommandName()); factory.addClass(TestCommand4.class,"tc4"); instance=factory.getInstance("tc4"); assertNotNull(instance); assertEquals(TestCommand4.class,instance.getClass()); assertEquals("tc4",instance.getCommandName()); String usage=instance.getUsage(); assertEquals("-tc4 tc4_usage",usage); assertEquals("tc4_description",instance.getDescription()); }

Class: org.apache.hadoop.fs.slive.TestSlive

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testDataWriting() throws Exception { long byteAm=100; File fn=getTestFile(); DataWriter writer=new DataWriter(rnd); FileOutputStream fs=new FileOutputStream(fn); GenerateOutput ostat=writer.writeSegment(byteAm,fs); LOG.info(ostat); fs.close(); assertTrue(ostat.getBytesWritten() == byteAm); DataVerifier vf=new DataVerifier(); FileInputStream fin=new FileInputStream(fn); VerifyOutput vfout=vf.verifyFile(byteAm,new DataInputStream(fin)); LOG.info(vfout); fin.close(); assertEquals(vfout.getBytesRead(),byteAm); assertTrue(vfout.getChunksDifferent() == 0); }

Class: org.apache.hadoop.fs.swift.TestReadPastBuffer

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Seek past the buffer then read * @throws Throwable problems */ @Test(timeout=SWIFT_TEST_TIMEOUT) public void testSeekAndReadPastEndOfFile() throws Throwable { instream=fs.open(readFile); assertEquals(0,instream.getPos()); instream.seek(SEEK_FILE_LEN - 2); assertTrue("Premature EOF",instream.read() != -1); assertTrue("Premature EOF",instream.read() != -1); assertMinusOne("read past end of file",instream.read()); }

Class: org.apache.hadoop.fs.swift.TestSeek

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testPositionedBulkReadDoesntChangePosition() throws Throwable { Path testSeekFile=new Path(testPath,"bigseekfile.txt"); byte[] block=SwiftTestUtils.dataset(65536,0,255); createFile(testSeekFile,block); instream=fs.open(testSeekFile); instream.seek(39999); assertTrue(-1 != instream.read()); assertEquals(40000,instream.getPos()); byte[] readBuffer=new byte[256]; instream.read(128,readBuffer,0,readBuffer.length); assertEquals(40000,instream.getPos()); assertEquals("@40000",block[40000],(byte)instream.read()); for (int i=0; i < 256; i++) { assertEquals("@" + i,block[i + 128],readBuffer[i]); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testSeekAndReadPastEndOfFile() throws Throwable { instream=fs.open(smallSeekFile); assertEquals(0,instream.getPos()); instream.seek(SMALL_SEEK_FILE_LEN - 2); assertTrue("Premature EOF",instream.read() != -1); assertTrue("Premature EOF",instream.read() != -1); assertMinusOne("read past end of file",instream.read()); }

APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testNegativeSeek() throws Throwable { instream=fs.open(smallSeekFile); assertEquals(0,instream.getPos()); try { instream.seek(-1); long p=instream.getPos(); LOG.warn("Seek to -1 returned a position of " + p); int result=instream.read(); fail("expected an exception, got data " + result + " at a position of "+ p); } catch ( IOException e) { } assertEquals(0,instream.getPos()); }

Class: org.apache.hadoop.fs.swift.TestSwiftFileSystemBlockLocation

BooleanVerifier NullVerifier HybridVerifier 
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testLocateOutOfRangeLen() throws Throwable { describe("overshooting the length is legal, as long as the" + " origin location is valid"); BlockLocation[] locations=getFs().getFileBlockLocations(createFileAndGetStatus(),0,data.length + 100); assertNotNull(locations); assertTrue(locations.length > 0); }

Class: org.apache.hadoop.fs.swift.TestSwiftFileSystemDirectories

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * test that a dir off root has a listStatus() call that * works as expected. and that when a child is added. it changes * @throws Exception on failures */ @Test(timeout=SWIFT_TEST_TIMEOUT) public void testDirectoriesOffRootHaveMatchingFileStatus() throws Exception { Path test=path("/test"); fs.delete(test,true); mkdirs(test); assertExists("created test directory",test); FileStatus[] statuses=fs.listStatus(test); String statusString=statusToString(test.toString(),statuses); assertEquals("Wrong number of elements in file status " + statusString,0,statuses.length); Path src=path("/test/file"); SwiftTestUtils.touch(fs,src); statuses=fs.listStatus(test); statusString=statusToString(test.toString(),statuses); assertEquals("Wrong number of elements in file status " + statusString,1,statuses.length); SwiftFileStatus stat=(SwiftFileStatus)statuses[0]; assertTrue("isDir(): Not a directory: " + stat,stat.isDir()); extraStatusAssertions(stat); }

Class: org.apache.hadoop.fs.swift.TestSwiftFileSystemExtendedContract

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Assert that a filesystem is case sensitive. * This is done by creating a mixed-case filename and asserting that * its lower case version is not there. * @throws Exception failures */ @Test(timeout=SWIFT_TEST_TIMEOUT) public void testFilesystemIsCaseSensitive() throws Exception { String mixedCaseFilename="/test/UPPER.TXT"; Path upper=path(mixedCaseFilename); Path lower=path(mixedCaseFilename.toLowerCase(Locale.ENGLISH)); assertFalse("File exists" + upper,fs.exists(upper)); assertFalse("File exists" + lower,fs.exists(lower)); FSDataOutputStream out=fs.create(upper); out.writeUTF("UPPER"); out.close(); FileStatus upperStatus=fs.getFileStatus(upper); assertExists("Original upper case file" + upper,upper); assertPathDoesNotExist("lower case file",lower); out=fs.create(lower); out.writeUTF("l"); out.close(); assertExists("lower case file",lower); assertExists("Original upper case file " + upper,upper); FileStatus newStatus=fs.getFileStatus(upper); assertEquals("Expected status:" + upperStatus + " actual status "+ newStatus,upperStatus.getLen(),newStatus.getLen()); }

Class: org.apache.hadoop.fs.swift.TestSwiftFileSystemPartitionedUploads

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier 
/** * tests functionality for big files ( > 5Gb) upload */ @Test(timeout=SWIFT_BULK_IO_TEST_TIMEOUT) public void testFilePartUpload() throws Throwable { final Path path=new Path("/test/testFilePartUpload"); int len=8192; final byte[] src=SwiftTestUtils.dataset(len,32,144); FSDataOutputStream out=fs.create(path,false,getBufferSize(),(short)1,BLOCK_SIZE); try { int totalPartitionsToWrite=len / PART_SIZE_BYTES; assertPartitionsWritten("Startup",out,0); int firstWriteLen=2048; out.write(src,0,firstWriteLen); long expected=getExpectedPartitionsWritten(firstWriteLen,PART_SIZE_BYTES,false); SwiftUtils.debug(LOG,"First write: predict %d partitions written",expected); assertPartitionsWritten("First write completed",out,expected); int remainder=len - firstWriteLen; SwiftUtils.debug(LOG,"remainder: writing: %d bytes",remainder); out.write(src,firstWriteLen,remainder); expected=getExpectedPartitionsWritten(len,PART_SIZE_BYTES,false); assertPartitionsWritten("Remaining data",out,expected); out.close(); expected=getExpectedPartitionsWritten(len,PART_SIZE_BYTES,true); assertPartitionsWritten("Stream closed",out,expected); Header[] headers=fs.getStore().getObjectHeaders(path,true); for ( Header header : headers) { LOG.info(header.toString()); } byte[] dest=readDataset(fs,path,len); LOG.info("Read dataset from " + path + ": data length ="+ len); SwiftTestUtils.compareByteArrays(src,dest,len); FileStatus status; final Path qualifiedPath=path.makeQualified(fs); status=fs.getFileStatus(qualifiedPath); BlockLocation[] locations=fs.getFileBlockLocations(status,0,len); assertNotNull("Null getFileBlockLocations()",locations); assertTrue("empty array returned for getFileBlockLocations()",locations.length > 0); try { validatePathLen(path,len); } catch ( AssertionError e) { throw new AssumptionViolatedException(e,null); } } finally { IOUtils.closeStream(out); } }

APIUtilityVerifier BooleanVerifier NullVerifier HybridVerifier 
/** * tests functionality for big files ( > 5Gb) upload */ @Test(timeout=SWIFT_BULK_IO_TEST_TIMEOUT) public void testFilePartUploadNoLengthCheck() throws IOException, URISyntaxException { final Path path=new Path("/test/testFilePartUploadLengthCheck"); int len=8192; final byte[] src=SwiftTestUtils.dataset(len,32,144); FSDataOutputStream out=fs.create(path,false,getBufferSize(),(short)1,BLOCK_SIZE); try { int totalPartitionsToWrite=len / PART_SIZE_BYTES; assertPartitionsWritten("Startup",out,0); int firstWriteLen=2048; out.write(src,0,firstWriteLen); long expected=getExpectedPartitionsWritten(firstWriteLen,PART_SIZE_BYTES,false); SwiftUtils.debug(LOG,"First write: predict %d partitions written",expected); assertPartitionsWritten("First write completed",out,expected); int remainder=len - firstWriteLen; SwiftUtils.debug(LOG,"remainder: writing: %d bytes",remainder); out.write(src,firstWriteLen,remainder); expected=getExpectedPartitionsWritten(len,PART_SIZE_BYTES,false); assertPartitionsWritten("Remaining data",out,expected); out.close(); expected=getExpectedPartitionsWritten(len,PART_SIZE_BYTES,true); assertPartitionsWritten("Stream closed",out,expected); Header[] headers=fs.getStore().getObjectHeaders(path,true); for ( Header header : headers) { LOG.info(header.toString()); } byte[] dest=readDataset(fs,path,len); LOG.info("Read dataset from " + path + ": data length ="+ len); SwiftTestUtils.compareByteArrays(src,dest,len); FileStatus status=fs.getFileStatus(path); BlockLocation[] locations=fs.getFileBlockLocations(status,0,len); assertNotNull("Null getFileBlockLocations()",locations); assertTrue("empty array returned for getFileBlockLocations()",locations.length > 0); } finally { IOUtils.closeStream(out); } }

Class: org.apache.hadoop.fs.swift.TestSwiftFileSystemRename

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testRenameFile() throws Exception { assumeRenameSupported(); final Path old=new Path("/test/alice/file"); final Path newPath=new Path("/test/bob/file"); fs.mkdirs(newPath.getParent()); final FSDataOutputStream fsDataOutputStream=fs.create(old); final byte[] message="Some data".getBytes(); fsDataOutputStream.write(message); fsDataOutputStream.close(); assertTrue(fs.exists(old)); rename(old,newPath,true,false,true); final FSDataInputStream bobStream=fs.open(newPath); final byte[] bytes=new byte[512]; final int read=bobStream.read(bytes); bobStream.close(); final byte[] buffer=new byte[read]; System.arraycopy(bytes,0,buffer,0,read); assertEquals(new String(message),new String(buffer)); }

APIUtilityVerifier BranchVerifier UtilityVerifier BooleanVerifier HybridVerifier 
/** * Rename a file into a directory * @throws Exception */ @Test(timeout=SWIFT_TEST_TIMEOUT) public void testRenameFileIntoExistingDirectory() throws Exception { assumeRenameSupported(); Path src=path("/test/olddir/file"); createFile(src); Path dst=path("/test/new/newdir"); fs.mkdirs(dst); rename(src,dst,true,false,true); Path newFile=path("/test/new/newdir/file"); if (!fs.exists(newFile)) { String ls=ls(dst); LOG.info(ls(path("/test/new"))); LOG.info(ls(path("/test/hadoop"))); fail("did not find " + newFile + " - directory: "+ ls); } assertTrue("Destination changed",fs.exists(path("/test/new/newdir/file"))); }

Class: org.apache.hadoop.fs.viewfs.TestChRootedFileSystem

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testWorkingDirectory() throws Exception { fSys.mkdirs(new Path("/testWd")); Path workDir=new Path("/testWd"); fSys.setWorkingDirectory(workDir); Assert.assertEquals(workDir,fSys.getWorkingDirectory()); fSys.setWorkingDirectory(new Path(".")); Assert.assertEquals(workDir,fSys.getWorkingDirectory()); fSys.setWorkingDirectory(new Path("..")); Assert.assertEquals(workDir.getParent(),fSys.getWorkingDirectory()); workDir=new Path("/testWd"); fSys.setWorkingDirectory(workDir); Assert.assertEquals(workDir,fSys.getWorkingDirectory()); Path relativeDir=new Path("existingDir1"); Path absoluteDir=new Path(workDir,"existingDir1"); fSys.mkdirs(absoluteDir); fSys.setWorkingDirectory(relativeDir); Assert.assertEquals(absoluteDir,fSys.getWorkingDirectory()); absoluteDir=new Path("/test/existingDir2"); fSys.mkdirs(absoluteDir); fSys.setWorkingDirectory(absoluteDir); Assert.assertEquals(absoluteDir,fSys.getWorkingDirectory()); Path absoluteFooPath=new Path(absoluteDir,"foo"); fSys.create(absoluteFooPath).close(); fSys.open(new Path("foo")).close(); fSys.mkdirs(new Path("newDir")); Assert.assertTrue(fSys.isDirectory(new Path(absoluteDir,"newDir"))); final String LOCAL_FS_ROOT_URI="file:///tmp/test"; absoluteDir=new Path(LOCAL_FS_ROOT_URI + "/existingDir"); fSys.mkdirs(absoluteDir); fSys.setWorkingDirectory(absoluteDir); Assert.assertEquals(absoluteDir,fSys.getWorkingDirectory()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testList() throws IOException { FileStatus fs=fSys.getFileStatus(new Path("/")); Assert.assertTrue(fs.isDirectory()); Assert.assertEquals(fs.getPath(),chrootedTo); FileStatus[] dirPaths=fSys.listStatus(new Path("/")); Assert.assertEquals(0,dirPaths.length); fileSystemTestHelper.createFile(fSys,"/foo"); fileSystemTestHelper.createFile(fSys,"/bar"); fSys.mkdirs(new Path("/dirX")); fSys.mkdirs(fileSystemTestHelper.getTestRootPath(fSys,"/dirY")); fSys.mkdirs(new Path("/dirX/dirXX")); dirPaths=fSys.listStatus(new Path("/")); Assert.assertEquals(4,dirPaths.length); fs=FileSystemTestHelper.containsPath(new Path(chrootedTo,"foo"),dirPaths); Assert.assertNotNull(fs); Assert.assertTrue(fs.isFile()); fs=FileSystemTestHelper.containsPath(new Path(chrootedTo,"bar"),dirPaths); Assert.assertNotNull(fs); Assert.assertTrue(fs.isFile()); fs=FileSystemTestHelper.containsPath(new Path(chrootedTo,"dirX"),dirPaths); Assert.assertNotNull(fs); Assert.assertTrue(fs.isDirectory()); fs=FileSystemTestHelper.containsPath(new Path(chrootedTo,"dirY"),dirPaths); Assert.assertNotNull(fs); Assert.assertTrue(fs.isDirectory()); }

Class: org.apache.hadoop.fs.viewfs.TestChRootedFs

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testList() throws IOException { FileStatus fs=fc.getFileStatus(new Path("/")); Assert.assertTrue(fs.isDirectory()); Assert.assertEquals(fs.getPath(),chrootedTo); FileStatus[] dirPaths=fc.util().listStatus(new Path("/")); Assert.assertEquals(0,dirPaths.length); fileContextTestHelper.createFileNonRecursive(fc,"/foo"); fileContextTestHelper.createFileNonRecursive(fc,"/bar"); fc.mkdir(new Path("/dirX"),FileContext.DEFAULT_PERM,false); fc.mkdir(fileContextTestHelper.getTestRootPath(fc,"/dirY"),FileContext.DEFAULT_PERM,false); fc.mkdir(new Path("/dirX/dirXX"),FileContext.DEFAULT_PERM,false); dirPaths=fc.util().listStatus(new Path("/")); Assert.assertEquals(4,dirPaths.length); fs=fileContextTestHelper.containsPath(fcTarget,"foo",dirPaths); Assert.assertNotNull(fs); Assert.assertTrue(fs.isFile()); fs=fileContextTestHelper.containsPath(fcTarget,"bar",dirPaths); Assert.assertNotNull(fs); Assert.assertTrue(fs.isFile()); fs=fileContextTestHelper.containsPath(fcTarget,"dirX",dirPaths); Assert.assertNotNull(fs); Assert.assertTrue(fs.isDirectory()); fs=fileContextTestHelper.containsPath(fcTarget,"dirY",dirPaths); Assert.assertNotNull(fs); Assert.assertTrue(fs.isDirectory()); }

UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testWorkingDirectory() throws Exception { fc.mkdir(new Path("/testWd"),FileContext.DEFAULT_PERM,false); Path workDir=new Path("/testWd"); Path fqWd=fc.makeQualified(workDir); fc.setWorkingDirectory(workDir); Assert.assertEquals(fqWd,fc.getWorkingDirectory()); fc.setWorkingDirectory(new Path(".")); Assert.assertEquals(fqWd,fc.getWorkingDirectory()); fc.setWorkingDirectory(new Path("..")); Assert.assertEquals(fqWd.getParent(),fc.getWorkingDirectory()); workDir=new Path("/testWd"); fqWd=fc.makeQualified(workDir); fc.setWorkingDirectory(workDir); Assert.assertEquals(fqWd,fc.getWorkingDirectory()); Path relativeDir=new Path("existingDir1"); Path absoluteDir=new Path(workDir,"existingDir1"); fc.mkdir(absoluteDir,FileContext.DEFAULT_PERM,true); Path fqAbsoluteDir=fc.makeQualified(absoluteDir); fc.setWorkingDirectory(relativeDir); Assert.assertEquals(fqAbsoluteDir,fc.getWorkingDirectory()); absoluteDir=new Path("/test/existingDir2"); fqAbsoluteDir=fc.makeQualified(absoluteDir); fc.mkdir(absoluteDir,FileContext.DEFAULT_PERM,true); fc.setWorkingDirectory(absoluteDir); Assert.assertEquals(fqAbsoluteDir,fc.getWorkingDirectory()); Path absolutePath=new Path(absoluteDir,"foo"); fc.create(absolutePath,EnumSet.of(CreateFlag.CREATE)).close(); fc.open(new Path("foo")).close(); fc.mkdir(new Path("newDir"),FileContext.DEFAULT_PERM,true); Assert.assertTrue(isDir(fc,new Path(absoluteDir,"newDir"))); absoluteDir=fileContextTestHelper.getTestRootPath(fc,"nonexistingPath"); try { fc.setWorkingDirectory(absoluteDir); Assert.fail("cd to non existing dir should have failed"); } catch ( Exception e) { } final String LOCAL_FS_ROOT_URI="file:///tmp/test"; absoluteDir=new Path(LOCAL_FS_ROOT_URI + "/existingDir"); fc.mkdir(absoluteDir,FileContext.DEFAULT_PERM,true); fc.setWorkingDirectory(absoluteDir); Assert.assertEquals(absoluteDir,fc.getWorkingDirectory()); }

Class: org.apache.hadoop.fs.viewfs.TestViewFileSystemDelegationTokenSupport

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testGetChildFileSystems() throws Exception { assertNull(fs1.getChildFileSystems()); assertNull(fs2.getChildFileSystems()); List children=Arrays.asList(viewFs.getChildFileSystems()); assertEquals(2,children.size()); assertTrue(children.contains(fs1)); assertTrue(children.contains(fs2)); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testAddDelegationTokens() throws Exception { Credentials creds=new Credentials(); Token fs1Tokens[]=addTokensWithCreds(fs1,creds); assertEquals(1,fs1Tokens.length); assertEquals(1,creds.numberOfTokens()); Token fs2Tokens[]=addTokensWithCreds(fs2,creds); assertEquals(1,fs2Tokens.length); assertEquals(2,creds.numberOfTokens()); Credentials savedCreds=creds; creds=new Credentials(); Token viewFsTokens[]=viewFs.addDelegationTokens("me",creds); assertEquals(2,viewFsTokens.length); assertTrue(creds.getAllTokens().containsAll(savedCreds.getAllTokens())); assertEquals(savedCreds.numberOfTokens(),creds.numberOfTokens()); viewFsTokens=viewFs.addDelegationTokens("me",creds); assertEquals(0,viewFsTokens.length); assertTrue(creds.getAllTokens().containsAll(savedCreds.getAllTokens())); assertEquals(savedCreds.numberOfTokens(),creds.numberOfTokens()); }

Class: org.apache.hadoop.fs.viewfs.TestViewFsDefaultValue

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
/** * Test that default blocksize values can be retrieved on the client side. */ @Test public void testGetDefaultBlockSize() throws IOException, URISyntaxException { try { vfs.getDefaultBlockSize(); fail("getServerDefaults on viewFs did not throw excetion!"); } catch ( NotInMountpointException e) { assertEquals(vfs.getDefaultBlockSize(testFilePath),DFS_BLOCK_SIZE_DEFAULT); } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
/** * Test that server default values can be retrieved on the client side. */ @Test public void testServerDefaults() throws IOException { try { FsServerDefaults serverDefaults=vfs.getServerDefaults(); fail("getServerDefaults on viewFs did not throw excetion!"); } catch ( NotInMountpointException e) { FsServerDefaults serverDefaults=vfs.getServerDefaults(testFilePath); assertEquals(DFS_BLOCK_SIZE_DEFAULT,serverDefaults.getBlockSize()); assertEquals(DFS_BYTES_PER_CHECKSUM_DEFAULT,serverDefaults.getBytesPerChecksum()); assertEquals(DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT,serverDefaults.getWritePacketSize()); assertEquals(IO_FILE_BUFFER_SIZE_DEFAULT,serverDefaults.getFileBufferSize()); assertEquals(DFS_REPLICATION_DEFAULT + 1,serverDefaults.getReplication()); } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
/** * Test that default replication values can be retrieved on the client side. */ @Test public void testGetDefaultReplication() throws IOException, URISyntaxException { try { vfs.getDefaultReplication(); fail("getDefaultReplication on viewFs did not throw excetion!"); } catch ( NotInMountpointException e) { assertEquals(vfs.getDefaultReplication(testFilePath),DFS_REPLICATION_DEFAULT + 1); } }

Class: org.apache.hadoop.fs.viewfs.TestViewFsFileStatusHdfs

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testGetFileChecksum() throws IOException, URISyntaxException { fileSystemTestHelper.createFile(fHdfs,someFile); fileSystemTestHelper.createFile(fHdfs,fileSystemTestHelper.getTestRootPath(fHdfs,someFile + "other"),1,512); FileChecksum viewFSCheckSum=vfs.getFileChecksum(new Path("/vfstmp/someFileForTestGetFileChecksum")); FileChecksum hdfsCheckSum=fHdfs.getFileChecksum(new Path(someFile)); FileChecksum otherHdfsFileCheckSum=fHdfs.getFileChecksum(new Path(someFile + "other")); assertEquals("HDFS and ViewFS checksums were not the same",viewFSCheckSum,hdfsCheckSum); assertFalse("Some other HDFS file which should not have had the same " + "checksum as viewFS did!",viewFSCheckSum.equals(otherHdfsFileCheckSum)); }

Class: org.apache.hadoop.fs.viewfs.ViewFileSystemBaseTest

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testGetBlockLocations() throws IOException { Path targetFilePath=new Path(targetTestRoot,"data/largeFile"); FileSystemTestHelper.createFile(fsTarget,targetFilePath,10,1024); Path viewFilePath=new Path("/data/largeFile"); Assert.assertTrue("Created File should be type File",fsView.isFile(viewFilePath)); BlockLocation[] viewBL=fsView.getFileBlockLocations(fsView.getFileStatus(viewFilePath),0,10240 + 100); Assert.assertEquals(SupportsBlocks ? 10 : 1,viewBL.length); BlockLocation[] targetBL=fsTarget.getFileBlockLocations(fsTarget.getFileStatus(targetFilePath),0,10240 + 100); compareBLs(viewBL,targetBL); fsView.getFileBlockLocations(fsView.getFileStatus(viewFilePath),0,10240 + 100); targetBL=fsTarget.getFileBlockLocations(fsTarget.getFileStatus(targetFilePath),0,10240 + 100); compareBLs(viewBL,targetBL); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testInternalGetAclStatus() throws IOException { final UserGroupInformation currentUser=UserGroupInformation.getCurrentUser(); AclStatus aclStatus=fsView.getAclStatus(new Path("/internalDir")); assertEquals(aclStatus.getOwner(),currentUser.getUserName()); assertEquals(aclStatus.getGroup(),currentUser.getGroupNames()[0]); assertEquals(aclStatus.getEntries(),AclUtil.getMinimalAcl(PERMISSION_555)); assertFalse(aclStatus.isStickyBit()); }

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testListOnMountTargetDirs() throws IOException { FileStatus[] dirPaths=fsView.listStatus(new Path("/data")); FileStatus fs; Assert.assertEquals(0,dirPaths.length); long len=fileSystemTestHelper.createFile(fsView,"/data/foo"); dirPaths=fsView.listStatus(new Path("/data")); Assert.assertEquals(1,dirPaths.length); fs=fileSystemTestHelper.containsPath(fsView,"/data/foo",dirPaths); Assert.assertNotNull(fs); Assert.assertTrue("Created file shoudl appear as a file",fs.isFile()); Assert.assertEquals(len,fs.getLen()); fsView.mkdirs(fileSystemTestHelper.getTestRootPath(fsView,"/data/dirX")); dirPaths=fsView.listStatus(new Path("/data")); Assert.assertEquals(2,dirPaths.length); fs=fileSystemTestHelper.containsPath(fsView,"/data/foo",dirPaths); Assert.assertNotNull(fs); Assert.assertTrue("Created file shoudl appear as a file",fs.isFile()); fs=fileSystemTestHelper.containsPath(fsView,"/data/dirX",dirPaths); Assert.assertNotNull(fs); Assert.assertTrue("Created dir should appear as a dir",fs.isDirectory()); }

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Test "readOps" (e.g. list, listStatus) * on internal dirs of mount table * These operations should succeed. */ @Test public void testListOnInternalDirsOfMountTable() throws IOException { FileStatus[] dirPaths=fsView.listStatus(new Path("/")); FileStatus fs; verifyRootChildren(dirPaths); dirPaths=fsView.listStatus(new Path("/internalDir")); Assert.assertEquals(2,dirPaths.length); fs=fileSystemTestHelper.containsPath(fsView,"/internalDir/internalDir2",dirPaths); Assert.assertNotNull(fs); Assert.assertTrue("A mount should appear as symlink",fs.isDirectory()); fs=fileSystemTestHelper.containsPath(fsView,"/internalDir/linkToDir2",dirPaths); Assert.assertNotNull(fs); Assert.assertTrue("A mount should appear as symlink",fs.isSymlink()); }

Class: org.apache.hadoop.fs.viewfs.ViewFsBaseTest

BooleanVerifier ExceptionVerifier HybridVerifier 
@Test(expected=AccessControlException.class) public void testInternalDeleteExisting2() throws IOException { Assert.assertTrue("Delete of link to dir should succeed",fcView.getFileStatus(new Path("/internalDir/linkToDir2")).isDirectory()); fcView.delete(new Path("/internalDir/linkToDir2"),false); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testInternalGetAclStatus() throws IOException { final UserGroupInformation currentUser=UserGroupInformation.getCurrentUser(); AclStatus aclStatus=fcView.getAclStatus(new Path("/internalDir")); assertEquals(aclStatus.getOwner(),currentUser.getUserName()); assertEquals(aclStatus.getGroup(),currentUser.getGroupNames()[0]); assertEquals(aclStatus.getEntries(),AclUtil.getMinimalAcl(PERMISSION_555)); assertFalse(aclStatus.isStickyBit()); }

UtilityVerifier BooleanVerifier HybridVerifier 
@Test public void testFileStatusOnMountLink() throws IOException { Assert.assertTrue("Slash should appear as dir",fcView.getFileStatus(new Path("/")).isDirectory()); checkFileStatus(fcView,"/",fileType.isDir); checkFileStatus(fcView,"/user",fileType.isDir); checkFileStatus(fcView,"/data",fileType.isDir); checkFileStatus(fcView,"/internalDir",fileType.isDir); checkFileStatus(fcView,"/internalDir/linkToDir2",fileType.isDir); checkFileStatus(fcView,"/internalDir/internalDir2/linkToDir3",fileType.isDir); checkFileStatus(fcView,"/linkToAFile",fileType.isFile); try { fcView.getFileStatus(new Path("/danglingLink")); Assert.fail("Excepted a not found exception here"); } catch ( FileNotFoundException e) { } }

BooleanVerifier ExceptionVerifier HybridVerifier 
@Test(expected=AccessControlException.class) public void testInternalRename2() throws IOException { Assert.assertTrue("linkTODir2 should be a dir",fcView.getFileStatus(new Path("/internalDir/linkToDir2")).isDirectory()); fcView.rename(new Path("/internalDir/linkToDir2"),new Path("/internalDir/dir1")); }

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Test "readOps" (e.g. list, listStatus) * on internal dirs of mount table * These operations should succeed. */ @Test public void testListOnInternalDirsOfMountTable() throws IOException { FileStatus[] dirPaths=fcView.util().listStatus(new Path("/")); FileStatus fs; Assert.assertEquals(7,dirPaths.length); fs=fileContextTestHelper.containsPath(fcView,"/user",dirPaths); Assert.assertNotNull(fs); Assert.assertTrue("A mount should appear as symlink",fs.isSymlink()); fs=fileContextTestHelper.containsPath(fcView,"/data",dirPaths); Assert.assertNotNull(fs); Assert.assertTrue("A mount should appear as symlink",fs.isSymlink()); fs=fileContextTestHelper.containsPath(fcView,"/internalDir",dirPaths); Assert.assertNotNull(fs); Assert.assertTrue("InternalDirs should appear as dir",fs.isDirectory()); fs=fileContextTestHelper.containsPath(fcView,"/danglingLink",dirPaths); Assert.assertNotNull(fs); Assert.assertTrue("A mount should appear as symlink",fs.isSymlink()); fs=fileContextTestHelper.containsPath(fcView,"/linkToAFile",dirPaths); Assert.assertNotNull(fs); Assert.assertTrue("A mount should appear as symlink",fs.isSymlink()); dirPaths=fcView.util().listStatus(new Path("/internalDir")); Assert.assertEquals(2,dirPaths.length); fs=fileContextTestHelper.containsPath(fcView,"/internalDir/internalDir2",dirPaths); Assert.assertNotNull(fs); Assert.assertTrue("InternalDirs should appear as dir",fs.isDirectory()); fs=fileContextTestHelper.containsPath(fcView,"/internalDir/linkToDir2",dirPaths); Assert.assertNotNull(fs); Assert.assertTrue("A mount should appear as symlink",fs.isSymlink()); }

Class: org.apache.hadoop.ha.ClientBaseWithFixes

TestCleaner BranchVerifier BooleanVerifier HybridVerifier 
@After public void tearDown() throws Exception { LOG.info("tearDown starting"); tearDownAll(); stopServer(); portNumLockFile.close(); portNumFile.delete(); if (tmpDir != null) { Assert.assertTrue("delete " + tmpDir.toString(),recursiveDelete(tmpDir)); } serverFactory=null; }

Class: org.apache.hadoop.ha.TestActiveStandbyElector

UtilityVerifier EqualityVerifier HybridVerifier 
/** * verify that receiveActiveData gives data when active exists, tells that * active does not exist and reports error in getting active information * @throws IOException * @throws InterruptedException * @throws KeeperException * @throws ActiveNotFoundException */ @Test public void testGetActiveData() throws ActiveNotFoundException, KeeperException, InterruptedException, IOException { byte[] data=new byte[8]; Mockito.when(mockZK.getData(Mockito.eq(ZK_LOCK_NAME),Mockito.eq(false),Mockito.anyObject())).thenReturn(data); Assert.assertEquals(data,elector.getActiveData()); Mockito.verify(mockZK,Mockito.times(1)).getData(Mockito.eq(ZK_LOCK_NAME),Mockito.eq(false),Mockito.anyObject()); Mockito.when(mockZK.getData(Mockito.eq(ZK_LOCK_NAME),Mockito.eq(false),Mockito.anyObject())).thenThrow(new KeeperException.NoNodeException()); try { elector.getActiveData(); Assert.fail("ActiveNotFoundException expected"); } catch ( ActiveNotFoundException e) { Mockito.verify(mockZK,Mockito.times(2)).getData(Mockito.eq(ZK_LOCK_NAME),Mockito.eq(false),Mockito.anyObject()); } try { Mockito.when(mockZK.getData(Mockito.eq(ZK_LOCK_NAME),Mockito.eq(false),Mockito.anyObject())).thenThrow(new KeeperException.AuthFailedException()); elector.getActiveData(); Assert.fail("KeeperException.AuthFailedException expected"); } catch ( KeeperException.AuthFailedException ke) { Mockito.verify(mockZK,Mockito.times(3)).getData(Mockito.eq(ZK_LOCK_NAME),Mockito.eq(false),Mockito.anyObject()); } }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Verify that, when the callback fails to enter active state, * the elector rejoins the election after sleeping for a short period. */ @Test public void testFailToBecomeActive() throws Exception { mockNoPriorActive(); elector.joinElection(data); Assert.assertEquals(0,elector.sleptFor); Mockito.doThrow(new ServiceFailedException("failed to become active")).when(mockApp).becomeActive(); elector.processResult(Code.OK.intValue(),ZK_LOCK_NAME,mockZK,ZK_LOCK_NAME); Mockito.verify(mockApp).becomeActive(); Mockito.verify(mockZK,Mockito.times(2)).create(ZK_LOCK_NAME,data,Ids.OPEN_ACL_UNSAFE,CreateMode.EPHEMERAL,elector,mockZK); Assert.assertEquals(2,count); Assert.assertTrue(elector.sleptFor > 0); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Verify that, when the callback fails to enter active state, after * a ZK disconnect (i.e from the StatCallback), that the elector rejoins * the election after sleeping for a short period. */ @Test public void testFailToBecomeActiveAfterZKDisconnect() throws Exception { mockNoPriorActive(); elector.joinElection(data); Assert.assertEquals(0,elector.sleptFor); elector.processResult(Code.CONNECTIONLOSS.intValue(),ZK_LOCK_NAME,mockZK,ZK_LOCK_NAME); Mockito.verify(mockZK,Mockito.times(2)).create(ZK_LOCK_NAME,data,Ids.OPEN_ACL_UNSAFE,CreateMode.EPHEMERAL,elector,mockZK); elector.processResult(Code.NODEEXISTS.intValue(),ZK_LOCK_NAME,mockZK,ZK_LOCK_NAME); verifyExistCall(1); Stat stat=new Stat(); stat.setEphemeralOwner(1L); Mockito.when(mockZK.getSessionId()).thenReturn(1L); Mockito.doThrow(new ServiceFailedException("fail to become active")).when(mockApp).becomeActive(); elector.processResult(Code.OK.intValue(),ZK_LOCK_NAME,mockZK,stat); Mockito.verify(mockApp,Mockito.times(1)).becomeActive(); Mockito.verify(mockZK,Mockito.times(3)).create(ZK_LOCK_NAME,data,Ids.OPEN_ACL_UNSAFE,CreateMode.EPHEMERAL,elector,mockZK); Assert.assertEquals(2,count); Assert.assertTrue(elector.sleptFor > 0); }

Class: org.apache.hadoop.ha.TestFailoverController

UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testFailoverToUnhealthyServiceFailsAndFailsback() throws Exception { DummyHAService svc1=new DummyHAService(HAServiceState.ACTIVE,svc1Addr); DummyHAService svc2=new DummyHAService(HAServiceState.STANDBY,svc2Addr); Mockito.doThrow(new HealthCheckFailedException("Failed!")).when(svc2.proxy).monitorHealth(); svc1.fencer=svc2.fencer=setupFencer(AlwaysSucceedFencer.class.getName()); try { doFailover(svc1,svc2,false,false); fail("Failover to unhealthy service"); } catch ( FailoverFailedException ffe) { } assertEquals(HAServiceState.ACTIVE,svc1.state); assertEquals(HAServiceState.STANDBY,svc2.state); }

UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testFailoverFromNonExistantServiceWithFencer() throws Exception { DummyHAService svc1=spy(new DummyHAService(null,svc1Addr)); HAServiceProtocol errorThrowingProxy=Mockito.mock(HAServiceProtocol.class,Mockito.withSettings().defaultAnswer(new ThrowsException(new IOException("Could not connect to host"))).extraInterfaces(Closeable.class)); Mockito.doNothing().when((Closeable)errorThrowingProxy).close(); Mockito.doReturn(errorThrowingProxy).when(svc1).getProxy(Mockito.any(),Mockito.anyInt()); DummyHAService svc2=new DummyHAService(HAServiceState.STANDBY,svc2Addr); svc1.fencer=svc2.fencer=setupFencer(AlwaysSucceedFencer.class.getName()); try { doFailover(svc1,svc2,false,false); } catch ( FailoverFailedException ffe) { fail("Non-existant active prevented failover"); } Mockito.verify(svc1).getProxy(Mockito.any(),Mockito.eq(CommonConfigurationKeys.HA_FC_GRACEFUL_FENCE_TIMEOUT_DEFAULT)); assertEquals(HAServiceState.ACTIVE,svc2.state); }

UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testFailoverToFaultyServiceFailsbackOK() throws Exception { DummyHAService svc1=spy(new DummyHAService(HAServiceState.ACTIVE,svc1Addr)); DummyHAService svc2=new DummyHAService(HAServiceState.STANDBY,svc2Addr); Mockito.doThrow(new ServiceFailedException("Failed!")).when(svc2.proxy).transitionToActive(anyReqInfo()); svc1.fencer=svc2.fencer=setupFencer(AlwaysSucceedFencer.class.getName()); try { doFailover(svc1,svc2,false,false); fail("Failover to already active service"); } catch ( FailoverFailedException ffe) { } verify(svc1.proxy).transitionToStandby(anyReqInfo()); verify(svc1.proxy).transitionToActive(anyReqInfo()); assertEquals(HAServiceState.ACTIVE,svc1.state); assertEquals(HAServiceState.STANDBY,svc2.state); }

UtilityVerifier IdentityVerifier EqualityVerifier HybridVerifier 
@Test public void testFailureToFenceOnFailbackFailsTheFailback() throws Exception { DummyHAService svc1=new DummyHAService(HAServiceState.ACTIVE,svc1Addr); DummyHAService svc2=new DummyHAService(HAServiceState.STANDBY,svc2Addr); Mockito.doThrow(new IOException("Failed!")).when(svc2.proxy).transitionToActive(anyReqInfo()); svc1.fencer=svc2.fencer=setupFencer(AlwaysFailFencer.class.getName()); AlwaysFailFencer.fenceCalled=0; try { doFailover(svc1,svc2,false,false); fail("Failed over to service that won't transition to active"); } catch ( FailoverFailedException ffe) { } assertEquals(HAServiceState.STANDBY,svc1.state); assertEquals(1,AlwaysFailFencer.fenceCalled); assertSame(svc2,AlwaysFailFencer.fencedSvc); }

UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testWeDontFailbackIfActiveWasFenced() throws Exception { DummyHAService svc1=new DummyHAService(HAServiceState.ACTIVE,svc1Addr); DummyHAService svc2=new DummyHAService(HAServiceState.STANDBY,svc2Addr); Mockito.doThrow(new ServiceFailedException("Failed!")).when(svc2.proxy).transitionToActive(anyReqInfo()); svc1.fencer=svc2.fencer=setupFencer(AlwaysSucceedFencer.class.getName()); try { doFailover(svc1,svc2,true,false); fail("Failed over to service that won't transition to active"); } catch ( FailoverFailedException ffe) { } assertEquals(HAServiceState.STANDBY,svc1.state); assertEquals(HAServiceState.STANDBY,svc2.state); }

UtilityVerifier BooleanVerifier HybridVerifier 
@Test public void testFailoverWithoutPermission() throws Exception { DummyHAService svc1=new DummyHAService(HAServiceState.ACTIVE,svc1Addr); Mockito.doThrow(new AccessControlException("Access denied")).when(svc1.proxy).getServiceStatus(); DummyHAService svc2=new DummyHAService(HAServiceState.STANDBY,svc2Addr); Mockito.doThrow(new AccessControlException("Access denied")).when(svc2.proxy).getServiceStatus(); svc1.fencer=svc2.fencer=setupFencer(AlwaysSucceedFencer.class.getName()); try { doFailover(svc1,svc2,false,false); fail("Can't failover when access is denied"); } catch ( FailoverFailedException ffe) { assertTrue(ffe.getCause().getMessage().contains("Access denied")); } }

UtilityVerifier IdentityVerifier EqualityVerifier HybridVerifier 
@Test public void testFencingFailureDuringFailover() throws Exception { DummyHAService svc1=new DummyHAService(HAServiceState.ACTIVE,svc1Addr); DummyHAService svc2=new DummyHAService(HAServiceState.STANDBY,svc2Addr); svc1.fencer=svc2.fencer=setupFencer(AlwaysFailFencer.class.getName()); AlwaysFailFencer.fenceCalled=0; try { doFailover(svc1,svc2,true,false); fail("Failed over even though fencing requested and failed"); } catch ( FailoverFailedException ffe) { } assertEquals(1,AlwaysFailFencer.fenceCalled); assertSame(svc1,AlwaysFailFencer.fencedSvc); assertEquals(HAServiceState.STANDBY,svc1.state); assertEquals(HAServiceState.STANDBY,svc2.state); }

UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testFailoverToNonExistantServiceFails() throws Exception { DummyHAService svc1=new DummyHAService(HAServiceState.ACTIVE,svc1Addr); DummyHAService svc2=spy(new DummyHAService(null,svc2Addr)); Mockito.doThrow(new IOException("Failed to connect")).when(svc2).getProxy(Mockito.any(),Mockito.anyInt()); svc1.fencer=svc2.fencer=setupFencer(AlwaysSucceedFencer.class.getName()); try { doFailover(svc1,svc2,false,false); fail("Failed over to a non-existant standby"); } catch ( FailoverFailedException ffe) { } assertEquals(HAServiceState.ACTIVE,svc1.state); }

UtilityVerifier IdentityVerifier EqualityVerifier HybridVerifier 
@Test public void testFailoverFromFaultyServiceFencingFailure() throws Exception { DummyHAService svc1=new DummyHAService(HAServiceState.ACTIVE,svc1Addr); Mockito.doThrow(new ServiceFailedException("Failed!")).when(svc1.proxy).transitionToStandby(anyReqInfo()); DummyHAService svc2=new DummyHAService(HAServiceState.STANDBY,svc2Addr); svc1.fencer=svc2.fencer=setupFencer(AlwaysFailFencer.class.getName()); AlwaysFailFencer.fenceCalled=0; try { doFailover(svc1,svc2,false,false); fail("Failed over even though fencing failed"); } catch ( FailoverFailedException ffe) { } assertEquals(1,AlwaysFailFencer.fenceCalled); assertSame(svc1,AlwaysFailFencer.fencedSvc); assertEquals(HAServiceState.ACTIVE,svc1.state); assertEquals(HAServiceState.STANDBY,svc2.state); }

UtilityVerifier IdentityVerifier EqualityVerifier HybridVerifier 
@Test public void testWeFenceOnFailbackIfTransitionToActiveFails() throws Exception { DummyHAService svc1=new DummyHAService(HAServiceState.ACTIVE,svc1Addr); DummyHAService svc2=new DummyHAService(HAServiceState.STANDBY,svc2Addr); Mockito.doThrow(new ServiceFailedException("Failed!")).when(svc2.proxy).transitionToActive(anyReqInfo()); svc1.fencer=svc2.fencer=setupFencer(AlwaysSucceedFencer.class.getName()); AlwaysSucceedFencer.fenceCalled=0; try { doFailover(svc1,svc2,false,false); fail("Failed over to service that won't transition to active"); } catch ( FailoverFailedException ffe) { } assertEquals(HAServiceState.ACTIVE,svc1.state); assertEquals(1,AlwaysSucceedFencer.fenceCalled); assertSame(svc2,AlwaysSucceedFencer.fencedSvc); }

UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testFailoverToUnreadyService() throws Exception { DummyHAService svc1=new DummyHAService(HAServiceState.ACTIVE,svc1Addr); DummyHAService svc2=new DummyHAService(HAServiceState.STANDBY,svc2Addr); Mockito.doReturn(STATE_NOT_READY).when(svc2.proxy).getServiceStatus(); svc1.fencer=svc2.fencer=setupFencer(AlwaysSucceedFencer.class.getName()); try { doFailover(svc1,svc2,false,false); fail("Can't failover to a service that's not ready"); } catch ( FailoverFailedException ffe) { if (!ffe.getMessage().contains("injected not ready")) { throw ffe; } } assertEquals(HAServiceState.ACTIVE,svc1.state); assertEquals(HAServiceState.STANDBY,svc2.state); doFailover(svc1,svc2,false,true); assertEquals(HAServiceState.STANDBY,svc1.state); assertEquals(HAServiceState.ACTIVE,svc2.state); }

UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testFailbackToFaultyServiceFails() throws Exception { DummyHAService svc1=new DummyHAService(HAServiceState.ACTIVE,svc1Addr); Mockito.doThrow(new ServiceFailedException("Failed!")).when(svc1.proxy).transitionToActive(anyReqInfo()); DummyHAService svc2=new DummyHAService(HAServiceState.STANDBY,svc2Addr); Mockito.doThrow(new ServiceFailedException("Failed!")).when(svc2.proxy).transitionToActive(anyReqInfo()); svc1.fencer=svc2.fencer=setupFencer(AlwaysSucceedFencer.class.getName()); try { doFailover(svc1,svc2,false,false); fail("Failover to already active service"); } catch ( FailoverFailedException ffe) { } assertEquals(HAServiceState.STANDBY,svc1.state); assertEquals(HAServiceState.STANDBY,svc2.state); }

UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testSelfFailoverFails() throws Exception { DummyHAService svc1=new DummyHAService(HAServiceState.ACTIVE,svc1Addr); DummyHAService svc2=new DummyHAService(HAServiceState.STANDBY,svc2Addr); svc1.fencer=svc2.fencer=setupFencer(AlwaysSucceedFencer.class.getName()); AlwaysSucceedFencer.fenceCalled=0; try { doFailover(svc1,svc1,false,false); fail("Can't failover to yourself"); } catch ( FailoverFailedException ffe) { } assertEquals(0,TestNodeFencer.AlwaysSucceedFencer.fenceCalled); assertEquals(HAServiceState.ACTIVE,svc1.state); try { doFailover(svc2,svc2,false,false); fail("Can't failover to yourself"); } catch ( FailoverFailedException ffe) { } assertEquals(0,TestNodeFencer.AlwaysSucceedFencer.fenceCalled); assertEquals(HAServiceState.STANDBY,svc2.state); }

UtilityVerifier IdentityVerifier EqualityVerifier HybridVerifier 
@Test public void testFailoverFromFaultyServiceSucceeds() throws Exception { DummyHAService svc1=new DummyHAService(HAServiceState.ACTIVE,svc1Addr); Mockito.doThrow(new ServiceFailedException("Failed!")).when(svc1.proxy).transitionToStandby(anyReqInfo()); DummyHAService svc2=new DummyHAService(HAServiceState.STANDBY,svc2Addr); svc1.fencer=svc2.fencer=setupFencer(AlwaysSucceedFencer.class.getName()); AlwaysSucceedFencer.fenceCalled=0; try { doFailover(svc1,svc2,false,false); } catch ( FailoverFailedException ffe) { fail("Faulty active prevented failover"); } assertEquals(1,AlwaysSucceedFencer.fenceCalled); assertSame(svc1,AlwaysSucceedFencer.fencedSvc); assertEquals(HAServiceState.ACTIVE,svc1.state); assertEquals(HAServiceState.ACTIVE,svc2.state); }

UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testFailoverFromActiveToActive() throws Exception { DummyHAService svc1=new DummyHAService(HAServiceState.ACTIVE,svc1Addr); DummyHAService svc2=new DummyHAService(HAServiceState.ACTIVE,svc2Addr); svc1.fencer=svc2.fencer=setupFencer(AlwaysSucceedFencer.class.getName()); try { doFailover(svc1,svc2,false,false); fail("Can't failover to an already active service"); } catch ( FailoverFailedException ffe) { } assertEquals(HAServiceState.ACTIVE,svc1.state); assertEquals(HAServiceState.ACTIVE,svc2.state); }

Class: org.apache.hadoop.ha.TestNodeFencer

BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier 
@Test public void testWhitespaceAndCommentsInConfig() throws BadFencingConfigurationException { NodeFencer fencer=setupFencer("\n" + " # the next one will always fail\n" + " " + AlwaysFailFencer.class.getName() + "(foo) # <- fails\n"+ AlwaysSucceedFencer.class.getName()+ "(bar) \n"); assertTrue(fencer.fence(MOCK_TARGET)); assertEquals(1,AlwaysFailFencer.fenceCalled); assertSame(MOCK_TARGET,AlwaysFailFencer.fencedSvc); assertEquals(1,AlwaysSucceedFencer.fenceCalled); assertSame(MOCK_TARGET,AlwaysSucceedFencer.fencedSvc); assertEquals("foo",AlwaysFailFencer.callArgs.get(0)); assertEquals("bar",AlwaysSucceedFencer.callArgs.get(0)); }

BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testMultipleFencers() throws BadFencingConfigurationException { NodeFencer fencer=setupFencer(AlwaysSucceedFencer.class.getName() + "(foo)\n" + AlwaysSucceedFencer.class.getName()+ "(bar)\n"); assertTrue(fencer.fence(MOCK_TARGET)); assertEquals(1,AlwaysSucceedFencer.fenceCalled); assertEquals("foo",AlwaysSucceedFencer.callArgs.get(0)); }

BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier 
@Test public void testArglessFencer() throws BadFencingConfigurationException { NodeFencer fencer=setupFencer(AlwaysSucceedFencer.class.getName()); assertTrue(fencer.fence(MOCK_TARGET)); assertEquals(1,AlwaysSucceedFencer.fenceCalled); assertSame(MOCK_TARGET,AlwaysSucceedFencer.fencedSvc); assertEquals(null,AlwaysSucceedFencer.callArgs.get(0)); }

BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier 
@Test public void testSingleFencer() throws BadFencingConfigurationException { NodeFencer fencer=setupFencer(AlwaysSucceedFencer.class.getName() + "(foo)"); assertTrue(fencer.fence(MOCK_TARGET)); assertEquals(1,AlwaysSucceedFencer.fenceCalled); assertSame(MOCK_TARGET,AlwaysSucceedFencer.fencedSvc); assertEquals("foo",AlwaysSucceedFencer.callArgs.get(0)); }

Class: org.apache.hadoop.ha.TestShellCommandFencer

UtilityVerifier BooleanVerifier HybridVerifier 
@Test public void testCheckParensNoArgs(){ try { Configuration conf=new Configuration(); new NodeFencer(conf,"shell()"); fail("Didn't throw when passing no args to shell"); } catch ( BadFencingConfigurationException confe) { assertTrue("Unexpected exception:" + StringUtils.stringifyException(confe),confe.getMessage().contains("Unable to parse line: 'shell()'")); } }

UtilityVerifier BooleanVerifier HybridVerifier 
@Test public void testCheckNoArgs(){ try { Configuration conf=new Configuration(); new NodeFencer(conf,"shell"); fail("Didn't throw when passing no args to shell"); } catch ( BadFencingConfigurationException confe) { assertTrue("Unexpected exception:" + StringUtils.stringifyException(confe),confe.getMessage().contains("No argument passed")); } }

Class: org.apache.hadoop.ha.TestSshFenceByTcpPort

InternalCallVerifier BooleanVerifier AssumptionSetter HybridVerifier 
@Test(timeout=20000) public void testFence() throws BadFencingConfigurationException { Assume.assumeTrue(isConfigured()); Configuration conf=new Configuration(); conf.set(SshFenceByTcpPort.CONF_IDENTITIES_KEY,TEST_KEYFILE); SshFenceByTcpPort fence=new SshFenceByTcpPort(); fence.setConf(conf); assertTrue(fence.tryFence(TEST_TARGET,null)); }

Class: org.apache.hadoop.ha.TestZKFailoverController

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test(timeout=15000) public void testGracefulFailoverFailBecomingActive() throws Exception { try { cluster.start(); cluster.waitForActiveLockHolder(0); cluster.setFailToBecomeActive(1,true); try { cluster.getService(1).getZKFCProxy(conf,5000).gracefulFailover(); fail("Did not fail to graceful failover when target failed " + "to become active!"); } catch ( ServiceFailedException sfe) { GenericTestUtils.assertExceptionContains("Couldn't make " + cluster.getService(1) + " active",sfe); GenericTestUtils.assertExceptionContains("injected failure",sfe); } assertEquals(0,cluster.getService(0).fenceCount); assertEquals(0,cluster.getService(1).fenceCount); cluster.waitForActiveLockHolder(0); } finally { cluster.stop(); } }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test that the ZKFC can gracefully cede its active status. */ @Test(timeout=15000) public void testCedeActive() throws Exception { try { cluster.start(); DummyZKFC zkfc=cluster.getZkfc(0); assertEquals(ActiveStandbyElector.State.ACTIVE,zkfc.getElectorForTests().getStateForTests()); ZKFCProtocol proxy=zkfc.getLocalTarget().getZKFCProxy(conf,5000); long st=Time.now(); proxy.cedeActive(3000); long et=Time.now(); assertTrue("RPC to cedeActive took " + (et - st) + " ms",et - st < 1000); assertEquals(ActiveStandbyElector.State.INIT,zkfc.getElectorForTests().getStateForTests()); cluster.waitForElectorState(0,ActiveStandbyElector.State.STANDBY); long et2=Time.now(); assertTrue("Should take ~3 seconds to rejoin. Only took " + (et2 - et) + "ms before rejoining.",et2 - et > 2800); } finally { cluster.stop(); } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
/** * Test that, if ACLs are specified in the configuration, that * it sets the ACLs when formatting the parent node. */ @Test(timeout=15000) public void testFormatSetsAcls() throws Exception { DummyHAService svc=cluster.getService(1); assertEquals(0,runFC(svc,"-formatZK")); ZooKeeper otherClient=createClient(); try { Stat stat=new Stat(); otherClient.getData(ZKFailoverController.ZK_PARENT_ZNODE_DEFAULT,false,stat); fail("Was able to read data without authenticating!"); } catch ( KeeperException.NoAuthException nae) { } }

Class: org.apache.hadoop.hdfs.TestBlockReaderFactory

UtilityVerifier BooleanVerifier HybridVerifier 
/** * Test the case where we have a failure to complete a short circuit read * that occurs, and then later on, we have a success. * Any thread waiting on a cache load should receive the failure (if it * occurs); however, the failure result should not be cached. We want * to be able to retry later and succeed. */ @Test(timeout=60000) public void testShortCircuitCacheTemporaryFailure() throws Exception { BlockReaderTestUtil.enableBlockReaderFactoryTracing(); final AtomicBoolean replicaCreationShouldFail=new AtomicBoolean(true); final AtomicBoolean testFailed=new AtomicBoolean(false); DFSInputStream.tcpReadsDisabledForTesting=true; BlockReaderFactory.createShortCircuitReplicaInfoCallback=new ShortCircuitCache.ShortCircuitReplicaCreator(){ @Override public ShortCircuitReplicaInfo createShortCircuitReplicaInfo(){ if (replicaCreationShouldFail.get()) { Uninterruptibles.sleepUninterruptibly(2,TimeUnit.SECONDS); return new ShortCircuitReplicaInfo(); } return null; } } ; TemporarySocketDirectory sockDir=new TemporarySocketDirectory(); Configuration conf=createShortCircuitConf("testShortCircuitCacheTemporaryFailure",sockDir); final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); final DistributedFileSystem dfs=cluster.getFileSystem(); final String TEST_FILE="/test_file"; final int TEST_FILE_LEN=4000; final int NUM_THREADS=2; final int SEED=0xFADED; final CountDownLatch gotFailureLatch=new CountDownLatch(NUM_THREADS); final CountDownLatch shouldRetryLatch=new CountDownLatch(1); DFSTestUtil.createFile(dfs,new Path(TEST_FILE),TEST_FILE_LEN,(short)1,SEED); Runnable readerRunnable=new Runnable(){ @Override public void run(){ try { List locatedBlocks=cluster.getNameNode().getRpcServer().getBlockLocations(TEST_FILE,0,TEST_FILE_LEN).getLocatedBlocks(); LocatedBlock lblock=locatedBlocks.get(0); BlockReader blockReader=null; try { blockReader=BlockReaderTestUtil.getBlockReader(cluster,lblock,0,TEST_FILE_LEN); Assert.fail("expected getBlockReader to fail the first time."); } catch ( Throwable t) { Assert.assertTrue("expected to see 'TCP reads were disabled " + "for testing' in exception " + t,t.getMessage().contains("TCP reads were disabled for testing")); } finally { if (blockReader != null) blockReader.close(); } gotFailureLatch.countDown(); shouldRetryLatch.await(); try { blockReader=BlockReaderTestUtil.getBlockReader(cluster,lblock,0,TEST_FILE_LEN); } catch ( Throwable t) { LOG.error("error trying to retrieve a block reader " + "the second time.",t); throw t; } finally { if (blockReader != null) blockReader.close(); } } catch ( Throwable t) { LOG.error("getBlockReader failure",t); testFailed.set(true); } } } ; Thread threads[]=new Thread[NUM_THREADS]; for (int i=0; i < NUM_THREADS; i++) { threads[i]=new Thread(readerRunnable); threads[i].start(); } gotFailureLatch.await(); replicaCreationShouldFail.set(false); shouldRetryLatch.countDown(); for (int i=0; i < NUM_THREADS; i++) { Uninterruptibles.joinUninterruptibly(threads[i]); } cluster.shutdown(); sockDir.close(); Assert.assertFalse(testFailed.get()); }

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * When an InterruptedException is sent to a thread calling * FileChannel#read, the FileChannel is immediately closed and the * thread gets an exception. This effectively means that we might have * someone asynchronously calling close() on the file descriptors we use * in BlockReaderLocal. So when unreferencing a ShortCircuitReplica in * ShortCircuitCache#unref, we should check if the FileChannel objects * are still open. If not, we should purge the replica to avoid giving * it out to any future readers. * This is a regression test for HDFS-6227: Short circuit read failed * due to ClosedChannelException. * Note that you may still get ClosedChannelException errors if two threads * are reading from the same replica and an InterruptedException is delivered * to one of them. */ @Test(timeout=120000) public void testPurgingClosedReplicas() throws Exception { BlockReaderTestUtil.enableBlockReaderFactoryTracing(); final AtomicInteger replicasCreated=new AtomicInteger(0); final AtomicBoolean testFailed=new AtomicBoolean(false); DFSInputStream.tcpReadsDisabledForTesting=true; BlockReaderFactory.createShortCircuitReplicaInfoCallback=new ShortCircuitCache.ShortCircuitReplicaCreator(){ @Override public ShortCircuitReplicaInfo createShortCircuitReplicaInfo(){ replicasCreated.incrementAndGet(); return null; } } ; TemporarySocketDirectory sockDir=new TemporarySocketDirectory(); Configuration conf=createShortCircuitConf("testPurgingClosedReplicas",sockDir); final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); final DistributedFileSystem dfs=cluster.getFileSystem(); final String TEST_FILE="/test_file"; final int TEST_FILE_LEN=4095; final int SEED=0xFADE0; final DistributedFileSystem fs=(DistributedFileSystem)FileSystem.get(cluster.getURI(0),conf); DFSTestUtil.createFile(fs,new Path(TEST_FILE),TEST_FILE_LEN,(short)1,SEED); final Semaphore sem=new Semaphore(0); final List locatedBlocks=cluster.getNameNode().getRpcServer().getBlockLocations(TEST_FILE,0,TEST_FILE_LEN).getLocatedBlocks(); final LocatedBlock lblock=locatedBlocks.get(0); final byte[] buf=new byte[TEST_FILE_LEN]; Runnable readerRunnable=new Runnable(){ @Override public void run(){ try { while (true) { BlockReader blockReader=null; try { blockReader=BlockReaderTestUtil.getBlockReader(cluster,lblock,0,TEST_FILE_LEN); sem.release(); try { blockReader.readAll(buf,0,TEST_FILE_LEN); } finally { sem.acquireUninterruptibly(); } } catch ( ClosedByInterruptException e) { LOG.info("got the expected ClosedByInterruptException",e); sem.release(); break; } finally { if (blockReader != null) blockReader.close(); } LOG.info("read another " + TEST_FILE_LEN + " bytes."); } } catch ( Throwable t) { LOG.error("getBlockReader failure",t); testFailed.set(true); sem.release(); } } } ; Thread thread=new Thread(readerRunnable); thread.start(); while (thread.isAlive()) { sem.acquireUninterruptibly(); thread.interrupt(); sem.release(); } Assert.assertFalse(testFailed.get()); BlockReader blockReader=null; try { blockReader=BlockReaderTestUtil.getBlockReader(cluster,lblock,0,TEST_FILE_LEN); blockReader.readFully(buf,0,TEST_FILE_LEN); } finally { if (blockReader != null) blockReader.close(); } byte expected[]=DFSTestUtil.calculateFileContentsFromSeed(SEED,TEST_FILE_LEN); Assert.assertTrue(Arrays.equals(buf,expected)); Assert.assertEquals(2,replicasCreated.get()); dfs.close(); cluster.shutdown(); sockDir.close(); }

TestInitializer AssumptionSetter HybridVerifier 
@Before public void init(){ DomainSocket.disableBindPathValidation(); Assume.assumeThat(DomainSocket.getLoadingFailureReason(),equalTo(null)); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test that a client which supports short-circuit reads using * shared memory can fall back to not using shared memory when * the server doesn't support it. */ @Test public void testShortCircuitReadFromServerWithoutShm() throws Exception { TemporarySocketDirectory sockDir=new TemporarySocketDirectory(); Configuration clientConf=createShortCircuitConf("testShortCircuitReadFromServerWithoutShm",sockDir); Configuration serverConf=new Configuration(clientConf); serverConf.setInt(DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS,0); DFSInputStream.tcpReadsDisabledForTesting=true; final MiniDFSCluster cluster=new MiniDFSCluster.Builder(serverConf).numDataNodes(1).build(); cluster.waitActive(); clientConf.set(DFS_CLIENT_CONTEXT,"testShortCircuitReadFromServerWithoutShm_clientContext"); final DistributedFileSystem fs=(DistributedFileSystem)FileSystem.get(cluster.getURI(0),clientConf); final String TEST_FILE="/test_file"; final int TEST_FILE_LEN=4000; final int SEED=0xFADEC; DFSTestUtil.createFile(fs,new Path(TEST_FILE),TEST_FILE_LEN,(short)1,SEED); byte contents[]=DFSTestUtil.readFileBuffer(fs,new Path(TEST_FILE)); byte expected[]=DFSTestUtil.calculateFileContentsFromSeed(SEED,TEST_FILE_LEN); Assert.assertTrue(Arrays.equals(contents,expected)); final ShortCircuitCache cache=fs.dfs.getClientContext().getShortCircuitCache(); final DatanodeInfo datanode=new DatanodeInfo(cluster.getDataNodes().get(0).getDatanodeId()); cache.getDfsClientShmManager().visit(new Visitor(){ @Override public void visit( HashMap info) throws IOException { Assert.assertEquals(1,info.size()); PerDatanodeVisitorInfo vinfo=info.get(datanode); Assert.assertTrue(vinfo.disabled); Assert.assertEquals(0,vinfo.full.size()); Assert.assertEquals(0,vinfo.notFull.size()); } } ); cluster.shutdown(); }

APIUtilityVerifier BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
/** * Test the case where we have multiple threads waiting on the * ShortCircuitCache delivering a certain ShortCircuitReplica. * In this case, there should only be one call to * createShortCircuitReplicaInfo. This one replica should be shared * by all threads. */ @Test(timeout=60000) public void testMultipleWaitersOnShortCircuitCache() throws Exception { final CountDownLatch latch=new CountDownLatch(1); final AtomicBoolean creationIsBlocked=new AtomicBoolean(true); final AtomicBoolean testFailed=new AtomicBoolean(false); DFSInputStream.tcpReadsDisabledForTesting=true; BlockReaderFactory.createShortCircuitReplicaInfoCallback=new ShortCircuitCache.ShortCircuitReplicaCreator(){ @Override public ShortCircuitReplicaInfo createShortCircuitReplicaInfo(){ Uninterruptibles.awaitUninterruptibly(latch); if (!creationIsBlocked.compareAndSet(true,false)) { Assert.fail("there were multiple calls to " + "createShortCircuitReplicaInfo. Only one was expected."); } return null; } } ; TemporarySocketDirectory sockDir=new TemporarySocketDirectory(); Configuration conf=createShortCircuitConf("testMultipleWaitersOnShortCircuitCache",sockDir); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); final DistributedFileSystem dfs=cluster.getFileSystem(); final String TEST_FILE="/test_file"; final int TEST_FILE_LEN=4000; final int SEED=0xFADED; final int NUM_THREADS=10; DFSTestUtil.createFile(dfs,new Path(TEST_FILE),TEST_FILE_LEN,(short)1,SEED); Runnable readerRunnable=new Runnable(){ @Override public void run(){ try { byte contents[]=DFSTestUtil.readFileBuffer(dfs,new Path(TEST_FILE)); Assert.assertFalse(creationIsBlocked.get()); byte expected[]=DFSTestUtil.calculateFileContentsFromSeed(SEED,TEST_FILE_LEN); Assert.assertTrue(Arrays.equals(contents,expected)); } catch ( Throwable e) { LOG.error("readerRunnable error",e); testFailed.set(true); } } } ; Thread threads[]=new Thread[NUM_THREADS]; for (int i=0; i < NUM_THREADS; i++) { threads[i]=new Thread(readerRunnable); threads[i].start(); } Thread.sleep(500); latch.countDown(); for (int i=0; i < NUM_THREADS; i++) { Uninterruptibles.joinUninterruptibly(threads[i]); } cluster.shutdown(); sockDir.close(); Assert.assertFalse(testFailed.get()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
/** * Test that a client which does not support short-circuit reads using * shared memory can talk with a server which supports it. */ @Test public void testShortCircuitReadFromClientWithoutShm() throws Exception { TemporarySocketDirectory sockDir=new TemporarySocketDirectory(); Configuration clientConf=createShortCircuitConf("testShortCircuitReadWithoutShm",sockDir); Configuration serverConf=new Configuration(clientConf); DFSInputStream.tcpReadsDisabledForTesting=true; final MiniDFSCluster cluster=new MiniDFSCluster.Builder(serverConf).numDataNodes(1).build(); cluster.waitActive(); clientConf.setInt(DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS,0); clientConf.set(DFS_CLIENT_CONTEXT,"testShortCircuitReadFromClientWithoutShm_clientContext"); final DistributedFileSystem fs=(DistributedFileSystem)FileSystem.get(cluster.getURI(0),clientConf); final String TEST_FILE="/test_file"; final int TEST_FILE_LEN=4000; final int SEED=0xFADEC; DFSTestUtil.createFile(fs,new Path(TEST_FILE),TEST_FILE_LEN,(short)1,SEED); byte contents[]=DFSTestUtil.readFileBuffer(fs,new Path(TEST_FILE)); byte expected[]=DFSTestUtil.calculateFileContentsFromSeed(SEED,TEST_FILE_LEN); Assert.assertTrue(Arrays.equals(contents,expected)); final ShortCircuitCache cache=fs.dfs.getClientContext().getShortCircuitCache(); Assert.assertEquals(null,cache.getDfsClientShmManager()); cluster.shutdown(); }

Class: org.apache.hadoop.hdfs.TestBlockReaderLocalLegacy

AssumptionSetter EqualityVerifier HybridVerifier 
@Test public void testBothOldAndNewShortCircuitConfigured() throws Exception { final short REPL_FACTOR=1; final int FILE_LENGTH=512; Assume.assumeTrue(null == DomainSocket.getLoadingFailureReason()); TemporarySocketDirectory socketDir=new TemporarySocketDirectory(); HdfsConfiguration conf=getConfiguration(socketDir); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); socketDir.close(); FileSystem fs=cluster.getFileSystem(); Path path=new Path("/foo"); byte orig[]=new byte[FILE_LENGTH]; for (int i=0; i < orig.length; i++) { orig[i]=(byte)(i % 10); } FSDataOutputStream fos=fs.create(path,(short)1); fos.write(orig); fos.close(); DFSTestUtil.waitReplication(fs,path,REPL_FACTOR); FSDataInputStream fis=cluster.getFileSystem().open(path); byte buf[]=new byte[FILE_LENGTH]; IOUtils.readFully(fis,buf,0,FILE_LENGTH); fis.close(); Assert.assertArrayEquals(orig,buf); Arrays.equals(orig,buf); cluster.shutdown(); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test that, in the case of an error, the position and limit of a ByteBuffer * are left unchanged. This is not mandated by ByteBufferReadable, but clients * of this class might immediately issue a retry on failure, so it's polite. */ @Test public void testStablePositionAfterCorruptRead() throws Exception { final short REPL_FACTOR=1; final long FILE_LENGTH=512L; HdfsConfiguration conf=getConfiguration(null); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); FileSystem fs=cluster.getFileSystem(); Path path=new Path("/corrupted"); DFSTestUtil.createFile(fs,path,FILE_LENGTH,REPL_FACTOR,12345L); DFSTestUtil.waitReplication(fs,path,REPL_FACTOR); ExtendedBlock block=DFSTestUtil.getFirstBlock(fs,path); int blockFilesCorrupted=cluster.corruptBlockOnDataNodes(block); assertEquals("All replicas not corrupted",REPL_FACTOR,blockFilesCorrupted); FSDataInputStream dis=cluster.getFileSystem().open(path); ByteBuffer buf=ByteBuffer.allocateDirect((int)FILE_LENGTH); boolean sawException=false; try { dis.read(buf); } catch ( ChecksumException ex) { sawException=true; } assertTrue(sawException); assertEquals(0,buf.position()); assertEquals(buf.capacity(),buf.limit()); dis=cluster.getFileSystem().open(path); buf.position(3); buf.limit(25); sawException=false; try { dis.read(buf); } catch ( ChecksumException ex) { sawException=true; } assertTrue(sawException); assertEquals(3,buf.position()); assertEquals(25,buf.limit()); cluster.shutdown(); }

Class: org.apache.hadoop.hdfs.TestClientProtocolForPipelineRecovery

UtilityVerifier BooleanVerifier HybridVerifier 
@Test public void testGetNewStamp() throws IOException { int numDataNodes=1; Configuration conf=new HdfsConfiguration(); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build(); try { cluster.waitActive(); FileSystem fileSys=cluster.getFileSystem(); NamenodeProtocols namenode=cluster.getNameNodeRpc(); Path file=new Path("dataprotocol.dat"); DFSTestUtil.createFile(fileSys,file,1L,(short)numDataNodes,0L); ExtendedBlock firstBlock=DFSTestUtil.getFirstBlock(fileSys,file); try { namenode.updateBlockForPipeline(firstBlock,""); Assert.fail("Can not get a new GS from a finalized block"); } catch ( IOException e) { Assert.assertTrue(e.getMessage().contains("is not under Construction")); } try { long newBlockId=firstBlock.getBlockId() + 1; ExtendedBlock newBlock=new ExtendedBlock(firstBlock.getBlockPoolId(),newBlockId,0,firstBlock.getGenerationStamp()); namenode.updateBlockForPipeline(newBlock,""); Assert.fail("Cannot get a new GS from a non-existent block"); } catch ( IOException e) { Assert.assertTrue(e.getMessage().contains("does not exist")); } DFSOutputStream out=null; try { out=(DFSOutputStream)(fileSys.append(file).getWrappedStream()); out.write(1); out.hflush(); FSDataInputStream in=null; try { in=fileSys.open(file); firstBlock=DFSTestUtil.getAllBlocks(in).get(0).getBlock(); } finally { IOUtils.closeStream(in); } DFSClient dfs=((DistributedFileSystem)fileSys).dfs; try { namenode.updateBlockForPipeline(firstBlock,"test" + dfs.clientName); Assert.fail("Cannot get a new GS for a non lease holder"); } catch ( LeaseExpiredException e) { Assert.assertTrue(e.getMessage().startsWith("Lease mismatch")); } try { namenode.updateBlockForPipeline(firstBlock,null); Assert.fail("Cannot get a new GS for a null lease holder"); } catch ( LeaseExpiredException e) { Assert.assertTrue(e.getMessage().startsWith("Lease mismatch")); } namenode.updateBlockForPipeline(firstBlock,dfs.clientName); } finally { IOUtils.closeStream(out); } } finally { cluster.shutdown(); } }

Class: org.apache.hadoop.hdfs.TestDFSAddressConfig

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier 
@Test public void testDFSAddressConfig() throws IOException { Configuration conf=new HdfsConfiguration(); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build(); cluster.waitActive(); ArrayList dns=cluster.getDataNodes(); DataNode dn=dns.get(0); String selfSocketAddr=dn.getXferAddress().toString(); System.out.println("DN Self Socket Addr == " + selfSocketAddr); assertTrue(selfSocketAddr.contains("/127.0.0.1:")); for (int i=0; i < dns.size(); i++) { DataNodeProperties dnp=cluster.stopDataNode(i); assertNotNull("Should have been able to stop simulated datanode",dnp); } conf.unset(DFS_DATANODE_ADDRESS_KEY); conf.unset(DFS_DATANODE_HTTP_ADDRESS_KEY); conf.unset(DFS_DATANODE_IPC_ADDRESS_KEY); cluster.startDataNodes(conf,1,true,StartupOption.REGULAR,null,null,null,false,true); dns=cluster.getDataNodes(); dn=dns.get(0); selfSocketAddr=dn.getXferAddress().toString(); System.out.println("DN Self Socket Addr == " + selfSocketAddr); assertTrue(selfSocketAddr.contains("/127.0.0.1:")); for (int i=0; i < dns.size(); i++) { DataNodeProperties dnp=cluster.stopDataNode(i); assertNotNull("Should have been able to stop simulated datanode",dnp); } conf.set(DFS_DATANODE_ADDRESS_KEY,"0.0.0.0:0"); conf.set(DFS_DATANODE_HTTP_ADDRESS_KEY,"0.0.0.0:0"); conf.set(DFS_DATANODE_IPC_ADDRESS_KEY,"0.0.0.0:0"); cluster.startDataNodes(conf,1,true,StartupOption.REGULAR,null,null,null,false,true); dns=cluster.getDataNodes(); dn=dns.get(0); selfSocketAddr=dn.getXferAddress().toString(); System.out.println("DN Self Socket Addr == " + selfSocketAddr); assertTrue(selfSocketAddr.contains("/0.0.0.0:")); cluster.shutdown(); }

Class: org.apache.hadoop.hdfs.TestDFSClientExcludedNodes

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test(timeout=60000) public void testExcludedNodesForgiveness() throws IOException { conf.setLong(DFSConfigKeys.DFS_CLIENT_WRITE_EXCLUDE_NODES_CACHE_EXPIRY_INTERVAL,2500); conf.setInt("io.bytes.per.checksum",512); cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); List props=cluster.dataNodes; FileSystem fs=cluster.getFileSystem(); Path filePath=new Path("/testForgivingExcludedNodes"); byte[] bytes=new byte[256]; for (int index=0; index < bytes.length; index++) { bytes[index]='0'; } FSDataOutputStream out=fs.create(filePath,true,4096,(short)3,512); out.write(bytes); out.write(bytes); out.hflush(); DataNodeProperties two=cluster.stopDataNode(2); DataNodeProperties one=cluster.stopDataNode(1); out.write(bytes); out.write(bytes); out.hflush(); Assert.assertEquals(true,cluster.restartDataNode(one,true)); Assert.assertEquals(true,cluster.restartDataNode(two,true)); cluster.waitActive(); ThreadUtil.sleepAtLeastIgnoreInterrupts(5000); cluster.stopDataNode(0); try { out.write(bytes); out.hflush(); out.close(); } catch ( Exception e) { fail("Excluded DataNodes should be forgiven after a while and " + "not cause file writing exception of: '" + e.getMessage() + "'"); } }

Class: org.apache.hadoop.hdfs.TestDFSClientFailover

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Make sure that client failover works when an active NN dies and the standby * takes over. */ @Test public void testDfsClientFailover() throws IOException, URISyntaxException { FileSystem fs=HATestUtil.configureFailoverFs(cluster,conf); DFSTestUtil.createFile(fs,TEST_FILE,FILE_LENGTH_TO_VERIFY,(short)1,1L); assertEquals(fs.getFileStatus(TEST_FILE).getLen(),FILE_LENGTH_TO_VERIFY); cluster.shutdownNameNode(0); cluster.transitionToActive(1); assertEquals(fs.getFileStatus(TEST_FILE).getLen(),FILE_LENGTH_TO_VERIFY); Path withPort=new Path("hdfs://" + HATestUtil.getLogicalHostname(cluster) + ":"+ NameNode.DEFAULT_PORT+ "/"+ TEST_FILE.toUri().getPath()); FileSystem fs2=withPort.getFileSystem(fs.getConf()); assertTrue(fs2.exists(withPort)); fs.close(); }

UtilityVerifier BooleanVerifier HybridVerifier 
/** * Make sure that a helpful error message is shown if a proxy provider is * configured for a given URI, but no actual addresses are configured for that * URI. */ @Test public void testFailureWithMisconfiguredHaNNs() throws Exception { String logicalHost="misconfigured-ha-uri"; Configuration conf=new Configuration(); conf.set(DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + "." + logicalHost,ConfiguredFailoverProxyProvider.class.getName()); URI uri=new URI("hdfs://" + logicalHost + "/test"); try { FileSystem.get(uri,conf).exists(new Path("/test")); fail("Successfully got proxy provider for misconfigured FS"); } catch ( IOException ioe) { LOG.info("got expected exception",ioe); assertTrue("expected exception did not contain helpful message",StringUtils.stringifyException(ioe).contains("Could not find any configured addresses for URI " + uri)); } }

Class: org.apache.hadoop.hdfs.TestDFSClientRetries

APIUtilityVerifier IterativeVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
/** * Test that checksum failures are recovered from by the next read on the same * DFSInputStream. Corruption information is not persisted from read call to * read call, so the client should expect consecutive calls to behave the same * way. See HDFS-3067. */ @Test public void testRetryOnChecksumFailure() throws Exception { HdfsConfiguration conf=new HdfsConfiguration(); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); try { final short REPL_FACTOR=1; final long FILE_LENGTH=512L; cluster.waitActive(); FileSystem fs=cluster.getFileSystem(); Path path=new Path("/corrupted"); DFSTestUtil.createFile(fs,path,FILE_LENGTH,REPL_FACTOR,12345L); DFSTestUtil.waitReplication(fs,path,REPL_FACTOR); ExtendedBlock block=DFSTestUtil.getFirstBlock(fs,path); int blockFilesCorrupted=cluster.corruptBlockOnDataNodes(block); assertEquals("All replicas not corrupted",REPL_FACTOR,blockFilesCorrupted); InetSocketAddress nnAddr=new InetSocketAddress("localhost",cluster.getNameNodePort()); DFSClient client=new DFSClient(nnAddr,conf); DFSInputStream dis=client.open(path.toString()); byte[] arr=new byte[(int)FILE_LENGTH]; for (int i=0; i < 2; ++i) { try { dis.read(arr,0,(int)FILE_LENGTH); fail("Expected ChecksumException not thrown"); } catch ( Exception ex) { GenericTestUtils.assertExceptionContains("Checksum error",ex); } } } finally { cluster.shutdown(); } }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testGetFileChecksum() throws Exception { final String f="/testGetFileChecksum"; final Path p=new Path(f); final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); try { cluster.waitActive(); final FileSystem fs=cluster.getFileSystem(); DFSTestUtil.createFile(fs,p,1L << 20,(short)3,20100402L); final FileChecksum cs1=fs.getFileChecksum(p); assertTrue(cs1 != null); final List locatedblocks=DFSClient.callGetBlockLocations(cluster.getNameNodeRpc(),f,0,Long.MAX_VALUE).getLocatedBlocks(); final DatanodeInfo first=locatedblocks.get(0).getLocations()[0]; cluster.stopDataNode(first.getXferAddr()); final FileChecksum cs2=fs.getFileChecksum(p); assertEquals(cs1,cs2); } finally { cluster.shutdown(); } }

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
/** * This tests that DFSInputStream failures are counted for a given read * operation, and not over the lifetime of the stream. It is a regression * test for HDFS-127. */ @Test public void testFailuresArePerOperation() throws Exception { long fileSize=4096; Path file=new Path("/testFile"); conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE,10); conf.setInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY,2 * 1000); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build(); try { cluster.waitActive(); FileSystem fs=cluster.getFileSystem(); NamenodeProtocols preSpyNN=cluster.getNameNodeRpc(); NamenodeProtocols spyNN=spy(preSpyNN); DFSClient client=new DFSClient(null,spyNN,conf,null); int maxBlockAcquires=client.getMaxBlockAcquireFailures(); assertTrue(maxBlockAcquires > 0); DFSTestUtil.createFile(fs,file,fileSize,(short)1,12345L); doAnswer(new FailNTimesAnswer(preSpyNN,maxBlockAcquires + 1)).when(spyNN).getBlockLocations(anyString(),anyLong(),anyLong()); try { IOUtils.copyBytes(client.open(file.toString()),new IOUtils.NullOutputStream(),conf,true); fail("Didn't get exception"); } catch ( IOException ioe) { DFSClient.LOG.info("Got expected exception",ioe); } doAnswer(new FailNTimesAnswer(preSpyNN,maxBlockAcquires)).when(spyNN).getBlockLocations(anyString(),anyLong(),anyLong()); IOUtils.copyBytes(client.open(file.toString()),new IOUtils.NullOutputStream(),conf,true); DFSClient.LOG.info("Starting test case for failure reset"); doAnswer(new FailNTimesAnswer(preSpyNN,maxBlockAcquires)).when(spyNN).getBlockLocations(anyString(),anyLong(),anyLong()); DFSInputStream is=client.open(file.toString()); byte buf[]=new byte[10]; IOUtils.readFully(is,buf,0,buf.length); DFSClient.LOG.info("First read successful after some failures."); doAnswer(new FailNTimesAnswer(preSpyNN,maxBlockAcquires)).when(spyNN).getBlockLocations(anyString(),anyLong(),anyLong()); is.openInfo(); is.seek(0); IOUtils.readFully(is,buf,0,buf.length); } finally { cluster.shutdown(); } }

Class: org.apache.hadoop.hdfs.TestDFSRemove

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testRemove() throws Exception { Configuration conf=new HdfsConfiguration(); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); try { FileSystem fs=cluster.getFileSystem(); assertTrue(fs.mkdirs(dir)); long dfsUsedStart=getTotalDfsUsed(cluster); { final int fileCount=100; for (int i=0; i < fileCount; i++) { Path a=new Path(dir,"a" + i); createFile(fs,a); } long dfsUsedMax=getTotalDfsUsed(cluster); for (int i=0; i < fileCount; i++) { Path a=new Path(dir,"a" + i); fs.delete(a,false); } Thread.sleep(3 * DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT * 1000); long dfsUsedFinal=getTotalDfsUsed(cluster); assertEquals("All blocks should be gone. start=" + dfsUsedStart + " max="+ dfsUsedMax+ " final="+ dfsUsedFinal,dfsUsedStart,dfsUsedFinal); } fs.delete(dir,true); } finally { if (cluster != null) { cluster.shutdown(); } } }

Class: org.apache.hadoop.hdfs.TestDFSRename

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testRename() throws Exception { Configuration conf=new HdfsConfiguration(); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); try { FileSystem fs=cluster.getFileSystem(); assertTrue(fs.mkdirs(dir)); { Path a=new Path(dir,"a"); Path aa=new Path(dir,"aa"); Path b=new Path(dir,"b"); createFile(fs,a); assertEquals(0,countLease(cluster)); DataOutputStream aa_out=fs.create(aa); aa_out.writeBytes("something"); assertEquals(1,countLease(cluster)); list(fs,"rename0"); fs.rename(a,b); list(fs,"rename1"); aa_out.writeBytes(" more"); aa_out.close(); list(fs,"rename2"); assertEquals(0,countLease(cluster)); } { Path dstPath=new Path("/c/d"); assertFalse(fs.exists(dstPath)); assertFalse(fs.rename(dir,dstPath)); } { Path src=new Path("/a/b"); Path dst=new Path("/a/b/c"); createFile(fs,new Path(src,"foo")); assertFalse(fs.rename(src,dst)); assertFalse(fs.rename(src.getParent(),dst.getParent())); } { Path src=new Path("/testPrefix"); Path dst=new Path("/testPrefixfile"); createFile(fs,src); assertTrue(fs.rename(src,dst)); } { Path src=new Path("/a/b/c"); createFile(fs,src); assertTrue(fs.rename(src,src)); assertFalse(fs.rename(new Path("/a/b"),new Path("/a/b/"))); assertTrue(fs.rename(src,new Path("/a/b/c/"))); } fs.delete(dir,true); } finally { if (cluster != null) { cluster.shutdown(); } } }

Class: org.apache.hadoop.hdfs.TestDFSShell

APIUtilityVerifier InternalCallVerifier BooleanVerifier ConditionMatcher HybridVerifier 
@Test(timeout=300000) public void testAppendToFile() throws Exception { final int inputFileLength=1024 * 1024; File testRoot=new File(TEST_ROOT_DIR,"testAppendtoFileDir"); testRoot.mkdirs(); File file1=new File(testRoot,"file1"); File file2=new File(testRoot,"file2"); createLocalFileWithRandomData(inputFileLength,file1); createLocalFileWithRandomData(inputFileLength,file2); Configuration conf=new HdfsConfiguration(); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); try { FileSystem dfs=cluster.getFileSystem(); assertTrue("Not a HDFS: " + dfs.getUri(),dfs instanceof DistributedFileSystem); Path remoteFile=new Path("/remoteFile"); FsShell shell=new FsShell(); shell.setConf(conf); String[] argv=new String[]{"-appendToFile",file1.toString(),file2.toString(),remoteFile.toString()}; int res=ToolRunner.run(shell,argv); assertThat(res,is(0)); assertThat(dfs.getFileStatus(remoteFile).getLen(),is((long)inputFileLength * 2)); res=ToolRunner.run(shell,argv); assertThat(res,is(0)); assertThat(dfs.getFileStatus(remoteFile).getLen(),is((long)inputFileLength * 4)); } finally { cluster.shutdown(); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testGet() throws IOException { DFSTestUtil.setLogLevel2All(FSInputChecker.LOG); final String fname="testGet.txt"; Path root=new Path("/test/get"); final Path remotef=new Path(root,fname); final Configuration conf=new HdfsConfiguration(); conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE,10); TestGetRunner runner=new TestGetRunner(){ private int count=0; private final FsShell shell=new FsShell(conf); public String run( int exitcode, String... options) throws IOException { String dst=new File(TEST_ROOT_DIR,fname + ++count).getAbsolutePath(); String[] args=new String[options.length + 3]; args[0]="-get"; args[args.length - 2]=remotef.toString(); args[args.length - 1]=dst; for (int i=0; i < options.length; i++) { args[i + 1]=options[i]; } show("args=" + Arrays.asList(args)); try { assertEquals(exitcode,shell.run(args)); } catch ( Exception e) { assertTrue(StringUtils.stringifyException(e),false); } return exitcode == 0 ? DFSTestUtil.readFile(new File(dst)) : null; } } ; File localf=createLocalFile(new File(TEST_ROOT_DIR,fname)); MiniDFSCluster cluster=null; DistributedFileSystem dfs=null; try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).format(true).build(); dfs=cluster.getFileSystem(); mkdir(dfs,root); dfs.copyFromLocalFile(false,false,new Path(localf.getPath()),remotef); String localfcontent=DFSTestUtil.readFile(localf); assertEquals(localfcontent,runner.run(0)); assertEquals(localfcontent,runner.run(0,"-ignoreCrc")); List files=getBlockFiles(cluster); dfs.close(); cluster.shutdown(); show("files=" + files); corrupt(files); cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).format(false).build(); dfs=cluster.getFileSystem(); assertEquals(null,runner.run(1)); String corruptedcontent=runner.run(0,"-ignoreCrc"); assertEquals(localfcontent.substring(1),corruptedcontent.substring(1)); assertEquals(localfcontent.charAt(0) + 1,corruptedcontent.charAt(0)); } finally { if (null != dfs) { try { dfs.close(); } catch ( Exception e) { } } if (null != cluster) { cluster.shutdown(); } localf.delete(); } }

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test to make sure that user namespace xattrs can be set only if path has * access and for sticky directorries, only owner/privileged user can write. * Trusted namespace xattrs can be set only with privileged users. * As user1: Create a directory (/foo) as user1, chown it to user1 (and * user1's group), grant rwx to "other". * As user2: Set an xattr (should pass with path access). * As user1: Set an xattr (should pass). * As user2: Read the xattr (should pass). Remove the xattr (should pass with * path access). * As user1: Read the xattr (should pass). Remove the xattr (should pass). * As user1: Change permissions only to owner * As User2: Set an Xattr (Should fail set with no path access) Remove an * Xattr (Should fail with no path access) * As SuperUser: Set an Xattr with Trusted (Should pass) */ @Test(timeout=30000) public void testSetXAttrPermissionAsDifferentOwner() throws Exception { final String USER1="user1"; final String GROUP1="supergroup"; final UserGroupInformation user1=UserGroupInformation.createUserForTesting(USER1,new String[]{GROUP1}); final UserGroupInformation user2=UserGroupInformation.createUserForTesting("user2",new String[]{"mygroup2"}); final UserGroupInformation SUPERUSER=UserGroupInformation.getCurrentUser(); MiniDFSCluster cluster=null; PrintStream bak=null; try { final Configuration conf=new HdfsConfiguration(); cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); final FileSystem fs=cluster.getFileSystem(); fs.setOwner(new Path("/"),USER1,GROUP1); bak=System.err; final FsShell fshell=new FsShell(conf); final ByteArrayOutputStream out=new ByteArrayOutputStream(); System.setErr(new PrintStream(out)); user1.doAs(new PrivilegedExceptionAction(){ @Override public Object run() throws Exception { final int ret=ToolRunner.run(fshell,new String[]{"-mkdir","/foo"}); assertEquals("Return should be 0",0,ret); out.reset(); return null; } } ); user1.doAs(new PrivilegedExceptionAction(){ @Override public Object run() throws Exception { final int ret=ToolRunner.run(fshell,new String[]{"-chmod","707","/foo"}); assertEquals("Return should be 0",0,ret); out.reset(); return null; } } ); user2.doAs(new PrivilegedExceptionAction(){ @Override public Object run() throws Exception { final int ret=ToolRunner.run(fshell,new String[]{"-setfattr","-n","user.a1","-v","1234","/foo"}); assertEquals("Returned should be 0",0,ret); out.reset(); return null; } } ); user1.doAs(new PrivilegedExceptionAction(){ @Override public Object run() throws Exception { final int ret=ToolRunner.run(fshell,new String[]{"-setfattr","-n","user.a1","-v","1234","/foo"}); assertEquals("Returned should be 0",0,ret); out.reset(); return null; } } ); user2.doAs(new PrivilegedExceptionAction(){ @Override public Object run() throws Exception { int ret=ToolRunner.run(fshell,new String[]{"-getfattr","-n","user.a1","/foo"}); assertEquals("Returned should be 0",0,ret); out.reset(); ret=ToolRunner.run(fshell,new String[]{"-setfattr","-x","user.a1","/foo"}); assertEquals("Returned should be 0",0,ret); out.reset(); return null; } } ); user1.doAs(new PrivilegedExceptionAction(){ @Override public Object run() throws Exception { return null; } } ); user1.doAs(new PrivilegedExceptionAction(){ @Override public Object run() throws Exception { final int ret=ToolRunner.run(fshell,new String[]{"-chmod","700","/foo"}); assertEquals("Return should be 0",0,ret); out.reset(); return null; } } ); user2.doAs(new PrivilegedExceptionAction(){ @Override public Object run() throws Exception { int ret=ToolRunner.run(fshell,new String[]{"-setfattr","-n","user.a2","/foo"}); assertEquals("Returned should be 1",1,ret); final String str=out.toString(); assertTrue("Permission denied printed",str.indexOf("Permission denied") != -1); out.reset(); return null; } } ); user2.doAs(new PrivilegedExceptionAction(){ @Override public Object run() throws Exception { int ret=ToolRunner.run(fshell,new String[]{"-setfattr","-x","user.a2","/foo"}); assertEquals("Returned should be 1",1,ret); final String str=out.toString(); assertTrue("Permission denied printed",str.indexOf("Permission denied") != -1); out.reset(); return null; } } ); SUPERUSER.doAs(new PrivilegedExceptionAction(){ @Override public Object run() throws Exception { int ret=ToolRunner.run(fshell,new String[]{"-setfattr","-n","trusted.a3","/foo"}); assertEquals("Returned should be 0",0,ret); out.reset(); return null; } } ); } finally { if (bak != null) { System.setErr(bak); } if (cluster != null) { cluster.shutdown(); } } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier ConditionMatcher HybridVerifier 
@Test(timeout=300000) public void testAppendToFileBadArgs() throws Exception { final int inputFileLength=1024 * 1024; File testRoot=new File(TEST_ROOT_DIR,"testAppendToFileBadArgsDir"); testRoot.mkdirs(); File file1=new File(testRoot,"file1"); createLocalFileWithRandomData(inputFileLength,file1); Configuration conf=new HdfsConfiguration(); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); try { FileSystem dfs=cluster.getFileSystem(); assertTrue("Not a HDFS: " + dfs.getUri(),dfs instanceof DistributedFileSystem); FsShell shell=new FsShell(); shell.setConf(conf); String[] argv=new String[]{"-appendToFile",file1.toString()}; int res=ToolRunner.run(shell,argv); assertThat(res,not(0)); Path remoteFile=new Path("/remoteFile"); argv=new String[]{"-appendToFile",file1.toString(),"-",remoteFile.toString()}; res=ToolRunner.run(shell,argv); assertThat(res,not(0)); } finally { cluster.shutdown(); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testURIPaths() throws Exception { Configuration srcConf=new HdfsConfiguration(); Configuration dstConf=new HdfsConfiguration(); MiniDFSCluster srcCluster=null; MiniDFSCluster dstCluster=null; File bak=new File(PathUtils.getTestDir(getClass()),"dfs_tmp_uri"); bak.mkdirs(); try { srcCluster=new MiniDFSCluster.Builder(srcConf).numDataNodes(2).build(); dstConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR,bak.getAbsolutePath()); dstCluster=new MiniDFSCluster.Builder(dstConf).numDataNodes(2).build(); FileSystem srcFs=srcCluster.getFileSystem(); FileSystem dstFs=dstCluster.getFileSystem(); FsShell shell=new FsShell(); shell.setConf(srcConf); String[] argv=new String[2]; argv[0]="-ls"; argv[1]=dstFs.getUri().toString() + "/"; int ret=ToolRunner.run(shell,argv); assertEquals("ls works on remote uri ",0,ret); dstFs.mkdirs(new Path("/hadoopdir")); argv=new String[2]; argv[0]="-rmr"; argv[1]=dstFs.getUri().toString() + "/hadoopdir"; ret=ToolRunner.run(shell,argv); assertEquals("-rmr works on remote uri " + argv[1],0,ret); argv[0]="-du"; argv[1]=dstFs.getUri().toString() + "/"; ret=ToolRunner.run(shell,argv); assertEquals("du works on remote uri ",0,ret); File furi=new File(TEST_ROOT_DIR,"furi"); createLocalFile(furi); argv=new String[3]; argv[0]="-put"; argv[1]=furi.toURI().toString(); argv[2]=dstFs.getUri().toString() + "/furi"; ret=ToolRunner.run(shell,argv); assertEquals(" put is working ",0,ret); argv[0]="-cp"; argv[1]=dstFs.getUri().toString() + "/furi"; argv[2]=srcFs.getUri().toString() + "/furi"; ret=ToolRunner.run(shell,argv); assertEquals(" cp is working ",0,ret); assertTrue(srcFs.exists(new Path("/furi"))); argv=new String[2]; argv[0]="-cat"; argv[1]=dstFs.getUri().toString() + "/furi"; ret=ToolRunner.run(shell,argv); assertEquals(" cat is working ",0,ret); dstFs.delete(new Path("/furi"),true); dstFs.delete(new Path("/hadoopdir"),true); String file="/tmp/chownTest"; Path path=new Path(file); Path parent=new Path("/tmp"); Path root=new Path("/"); TestDFSShell.writeFile(dstFs,path); runCmd(shell,"-chgrp","-R","herbivores",dstFs.getUri().toString() + "/*"); confirmOwner(null,"herbivores",dstFs,parent,path); runCmd(shell,"-chown","-R",":reptiles",dstFs.getUri().toString() + "/"); confirmOwner(null,"reptiles",dstFs,root,parent,path); argv[0]="-cat"; argv[1]="hdfs:///furi"; ret=ToolRunner.run(shell,argv); assertEquals(" default works for cat",0,ret); argv[0]="-ls"; argv[1]="hdfs:///"; ret=ToolRunner.run(shell,argv); assertEquals("default works for ls ",0,ret); argv[0]="-rmr"; argv[1]="hdfs:///furi"; ret=ToolRunner.run(shell,argv); assertEquals("default works for rm/rmr",0,ret); } finally { if (null != srcCluster) { srcCluster.shutdown(); } if (null != dstCluster) { dstCluster.shutdown(); } } }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Tests various options of DFSShell. */ @Test(timeout=120000) public void testDFSShell() throws IOException { Configuration conf=new HdfsConfiguration(); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); FileSystem fs=cluster.getFileSystem(); assertTrue("Not a HDFS: " + fs.getUri(),fs instanceof DistributedFileSystem); DistributedFileSystem fileSys=(DistributedFileSystem)fs; FsShell shell=new FsShell(); shell.setConf(conf); try { Path myPath=new Path("/test/mkdirs"); assertTrue(fileSys.mkdirs(myPath)); assertTrue(fileSys.exists(myPath)); assertTrue(fileSys.mkdirs(myPath)); Path myFile=new Path("/test/mkdirs/myFile"); writeFile(fileSys,myFile); assertTrue(fileSys.exists(myFile)); Path myFile2=new Path("/test/mkdirs/myFile2"); writeFile(fileSys,myFile2); assertTrue(fileSys.exists(myFile2)); { String[] args=new String[2]; args[0]="-rm"; args[1]="/test/mkdirs/myFile*"; int val=-1; try { val=shell.run(args); } catch ( Exception e) { System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage()); } assertTrue(val == 0); assertFalse(fileSys.exists(myFile)); assertFalse(fileSys.exists(myFile2)); writeFile(fileSys,myFile); assertTrue(fileSys.exists(myFile)); writeFile(fileSys,myFile2); assertTrue(fileSys.exists(myFile2)); } { String[] args=new String[3]; args[0]="-cat"; args[1]="/test/mkdirs/myFile"; args[2]="/test/mkdirs/myFile2"; int val=-1; try { val=shell.run(args); } catch ( Exception e) { System.err.println("Exception raised from DFSShell.run: " + StringUtils.stringifyException(e)); } assertTrue(val == 0); } fileSys.delete(myFile2,true); { String[] args=new String[2]; args[0]="-cat"; args[1]="/test/mkdirs/myFile1"; int val=-1; try { val=shell.run(args); } catch ( Exception e) { System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage()); } assertTrue(val != 0); } { String[] args=new String[2]; args[0]="-rm"; args[1]="/test/mkdirs/myFile1"; int val=-1; try { val=shell.run(args); } catch ( Exception e) { System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage()); } assertTrue(val != 0); } { String[] args=new String[2]; args[0]="-rm"; args[1]="/test/mkdirs/myFile"; int val=-1; try { val=shell.run(args); } catch ( Exception e) { System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage()); } assertTrue(val == 0); } { String[] args; int val; args=new String[3]; args[0]="-test"; args[1]="-e"; args[2]="/test/mkdirs/noFileHere"; val=-1; try { val=shell.run(args); } catch ( Exception e) { System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage()); } assertEquals(1,val); args[1]="-z"; val=-1; try { val=shell.run(args); } catch ( Exception e) { System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage()); } assertEquals(1,val); args=new String[2]; args[0]="-touchz"; args[1]="/test/mkdirs/isFileHere"; val=-1; try { val=shell.run(args); } catch ( Exception e) { System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage()); } assertEquals(0,val); args=new String[2]; args[0]="-touchz"; args[1]="/test/mkdirs/thisDirNotExists/isFileHere"; val=-1; try { val=shell.run(args); } catch ( Exception e) { System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage()); } assertEquals(1,val); args=new String[3]; args[0]="-test"; args[1]="-e"; args[2]="/test/mkdirs/isFileHere"; val=-1; try { val=shell.run(args); } catch ( Exception e) { System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage()); } assertEquals(0,val); args[1]="-d"; val=-1; try { val=shell.run(args); } catch ( Exception e) { System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage()); } assertEquals(1,val); args[1]="-z"; val=-1; try { val=shell.run(args); } catch ( Exception e) { System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage()); } assertEquals(0,val); } { String[] args=new String[2]; args[0]="-mkdir"; args[1]="/test/dir1"; int val=-1; try { val=shell.run(args); } catch ( Exception e) { System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage()); } assertEquals(0,val); String[] args1=new String[3]; args1[0]="-cp"; args1[1]="/test/dir1"; args1[2]="/test/dir1/dir2"; val=0; try { val=shell.run(args1); } catch ( Exception e) { System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage()); } assertEquals(1,val); args1[0]="-cp"; args1[1]="/test/dir1"; args1[2]="/test/dir1foo"; val=-1; try { val=shell.run(args1); } catch ( Exception e) { System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage()); } assertEquals(0,val); } { String[] args=new String[3]; args[0]="-test"; args[1]="-f"; args[2]="/test/mkdirs/noFileHere"; int val=-1; try { val=shell.run(args); } catch ( Exception e) { System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage()); } assertEquals(1,val); } { String[] args=new String[3]; args[0]="-test"; args[1]="-f"; args[2]="/test/mkdirs"; int val=-1; try { val=shell.run(args); } catch ( Exception e) { System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage()); } assertEquals(1,val); } { writeFile(fileSys,myFile); assertTrue(fileSys.exists(myFile)); String[] args=new String[3]; args[0]="-test"; args[1]="-f"; args[2]=myFile.toString(); int val=-1; try { val=shell.run(args); } catch ( Exception e) { System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage()); } assertEquals(0,val); } { String[] args=new String[3]; args[0]="-test"; args[1]="-s"; args[2]="/test/mkdirs/noFileHere"; int val=-1; try { val=shell.run(args); } catch ( Exception e) { System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage()); } assertEquals(1,val); } { String[] args=new String[3]; args[0]="-test"; args[1]="-s"; args[2]="/test/mkdirs/isFileHere"; int val=-1; try { val=shell.run(args); } catch ( Exception e) { System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage()); } assertEquals(1,val); } { String[] args=new String[3]; args[0]="-test"; args[1]="-s"; args[2]=myFile.toString(); int val=-1; try { val=shell.run(args); } catch ( Exception e) { System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage()); } assertEquals(0,val); } } finally { try { fileSys.close(); } catch ( Exception e) { } cluster.shutdown(); } }

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testSetXAttrPermission() throws Exception { UserGroupInformation user=UserGroupInformation.createUserForTesting("user",new String[]{"mygroup"}); MiniDFSCluster cluster=null; PrintStream bak=null; try { final Configuration conf=new HdfsConfiguration(); cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); FileSystem fs=cluster.getFileSystem(); Path p=new Path("/foo"); fs.mkdirs(p); bak=System.err; final FsShell fshell=new FsShell(conf); final ByteArrayOutputStream out=new ByteArrayOutputStream(); System.setErr(new PrintStream(out)); fs.setPermission(p,new FsPermission((short)0700)); user.doAs(new PrivilegedExceptionAction(){ @Override public Object run() throws Exception { int ret=ToolRunner.run(fshell,new String[]{"-setfattr","-n","user.a1","-v","1234","/foo"}); assertEquals("Returned should be 1",1,ret); String str=out.toString(); assertTrue("Permission denied printed",str.indexOf("Permission denied") != -1); out.reset(); return null; } } ); int ret=ToolRunner.run(fshell,new String[]{"-setfattr","-n","user.a1","-v","1234","/foo"}); assertEquals("Returned should be 0",0,ret); out.reset(); fs.setPermission(p,new FsPermission((short)0750)); user.doAs(new PrivilegedExceptionAction(){ @Override public Object run() throws Exception { int ret=ToolRunner.run(fshell,new String[]{"-getfattr","-n","user.a1","/foo"}); assertEquals("Returned should be 1",1,ret); String str=out.toString(); assertTrue("Permission denied printed",str.indexOf("Permission denied") != -1); out.reset(); ret=ToolRunner.run(fshell,new String[]{"-setfattr","-x","user.a1","/foo"}); assertEquals("Returned should be 1",1,ret); str=out.toString(); assertTrue("Permission denied printed",str.indexOf("Permission denied") != -1); out.reset(); return null; } } ); } finally { if (bak != null) { System.setErr(bak); } if (cluster != null) { cluster.shutdown(); } } }

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testRemoteException() throws Exception { UserGroupInformation tmpUGI=UserGroupInformation.createUserForTesting("tmpname",new String[]{"mygroup"}); MiniDFSCluster dfs=null; PrintStream bak=null; try { final Configuration conf=new HdfsConfiguration(); dfs=new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); FileSystem fs=dfs.getFileSystem(); Path p=new Path("/foo"); fs.mkdirs(p); fs.setPermission(p,new FsPermission((short)0700)); bak=System.err; tmpUGI.doAs(new PrivilegedExceptionAction(){ @Override public Object run() throws Exception { FsShell fshell=new FsShell(conf); ByteArrayOutputStream out=new ByteArrayOutputStream(); PrintStream tmp=new PrintStream(out); System.setErr(tmp); String[] args=new String[2]; args[0]="-ls"; args[1]="/foo"; int ret=ToolRunner.run(fshell,args); assertEquals("returned should be 1",1,ret); String str=out.toString(); assertTrue("permission denied printed",str.indexOf("Permission denied") != -1); out.reset(); return null; } } ); } finally { if (bak != null) { System.setErr(bak); } if (dfs != null) { dfs.shutdown(); } } }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testZeroSizeFile() throws IOException { Configuration conf=new HdfsConfiguration(); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); FileSystem fs=cluster.getFileSystem(); assertTrue("Not a HDFS: " + fs.getUri(),fs instanceof DistributedFileSystem); final DistributedFileSystem dfs=(DistributedFileSystem)fs; try { final File f1=new File(TEST_ROOT_DIR,"f1"); assertTrue(!f1.exists()); assertTrue(f1.createNewFile()); assertTrue(f1.exists()); assertTrue(f1.isFile()); assertEquals(0L,f1.length()); final Path root=mkdir(dfs,new Path("/test/zeroSizeFile")); final Path remotef=new Path(root,"dst"); show("copy local " + f1 + " to remote "+ remotef); dfs.copyFromLocalFile(false,false,new Path(f1.getPath()),remotef); show("Block size = " + dfs.getFileStatus(remotef).getBlockSize()); final File f2=new File(TEST_ROOT_DIR,"f2"); assertTrue(!f2.exists()); dfs.copyToLocalFile(remotef,new Path(f2.getPath())); assertTrue(f2.exists()); assertTrue(f2.isFile()); assertEquals(0L,f2.length()); f1.delete(); f2.delete(); } finally { try { dfs.close(); } catch ( Exception e) { } cluster.shutdown(); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * check command error outputs and exit statuses. */ @Test(timeout=30000) public void testErrOutPut() throws Exception { Configuration conf=new HdfsConfiguration(); MiniDFSCluster cluster=null; PrintStream bak=null; try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); FileSystem srcFs=cluster.getFileSystem(); Path root=new Path("/nonexistentfile"); bak=System.err; ByteArrayOutputStream out=new ByteArrayOutputStream(); PrintStream tmp=new PrintStream(out); System.setErr(tmp); String[] argv=new String[2]; argv[0]="-cat"; argv[1]=root.toUri().getPath(); int ret=ToolRunner.run(new FsShell(),argv); assertEquals(" -cat returned 1 ",1,ret); String returned=out.toString(); assertTrue("cat does not print exceptions ",(returned.lastIndexOf("Exception") == -1)); out.reset(); argv[0]="-rm"; argv[1]=root.toString(); FsShell shell=new FsShell(); shell.setConf(conf); ret=ToolRunner.run(shell,argv); assertEquals(" -rm returned 1 ",1,ret); returned=out.toString(); out.reset(); assertTrue("rm prints reasonable error ",(returned.lastIndexOf("No such file or directory") != -1)); argv[0]="-rmr"; argv[1]=root.toString(); ret=ToolRunner.run(shell,argv); assertEquals(" -rmr returned 1",1,ret); returned=out.toString(); assertTrue("rmr prints reasonable error ",(returned.lastIndexOf("No such file or directory") != -1)); out.reset(); argv[0]="-du"; argv[1]="/nonexistentfile"; ret=ToolRunner.run(shell,argv); returned=out.toString(); assertTrue(" -du prints reasonable error ",(returned.lastIndexOf("No such file or directory") != -1)); out.reset(); argv[0]="-dus"; argv[1]="/nonexistentfile"; ret=ToolRunner.run(shell,argv); returned=out.toString(); assertTrue(" -dus prints reasonable error",(returned.lastIndexOf("No such file or directory") != -1)); out.reset(); argv[0]="-ls"; argv[1]="/nonexistenfile"; ret=ToolRunner.run(shell,argv); returned=out.toString(); assertTrue(" -ls does not return Found 0 items",(returned.lastIndexOf("Found 0") == -1)); out.reset(); argv[0]="-ls"; argv[1]="/nonexistentfile"; ret=ToolRunner.run(shell,argv); assertEquals(" -lsr should fail ",1,ret); out.reset(); srcFs.mkdirs(new Path("/testdir")); argv[0]="-ls"; argv[1]="/testdir"; ret=ToolRunner.run(shell,argv); returned=out.toString(); assertTrue(" -ls does not print out anything ",(returned.lastIndexOf("Found 0") == -1)); out.reset(); argv[0]="-ls"; argv[1]="/user/nonxistant/*"; ret=ToolRunner.run(shell,argv); assertEquals(" -ls on nonexistent glob returns 1",1,ret); out.reset(); argv[0]="-mkdir"; argv[1]="/testdir"; ret=ToolRunner.run(shell,argv); returned=out.toString(); assertEquals(" -mkdir returned 1 ",1,ret); assertTrue(" -mkdir returned File exists",(returned.lastIndexOf("File exists") != -1)); Path testFile=new Path("/testfile"); OutputStream outtmp=srcFs.create(testFile); outtmp.write(testFile.toString().getBytes()); outtmp.close(); out.reset(); argv[0]="-mkdir"; argv[1]="/testfile"; ret=ToolRunner.run(shell,argv); returned=out.toString(); assertEquals(" -mkdir returned 1",1,ret); assertTrue(" -mkdir returned this is a file ",(returned.lastIndexOf("not a directory") != -1)); out.reset(); argv=new String[3]; argv[0]="-mv"; argv[1]="/testfile"; argv[2]="file"; ret=ToolRunner.run(shell,argv); assertEquals("mv failed to rename",1,ret); out.reset(); argv=new String[3]; argv[0]="-mv"; argv[1]="/testfile"; argv[2]="/testfiletest"; ret=ToolRunner.run(shell,argv); returned=out.toString(); assertTrue("no output from rename",(returned.lastIndexOf("Renamed") == -1)); out.reset(); argv[0]="-mv"; argv[1]="/testfile"; argv[2]="/testfiletmp"; ret=ToolRunner.run(shell,argv); returned=out.toString(); assertTrue(" unix like output",(returned.lastIndexOf("No such file or") != -1)); out.reset(); argv=new String[1]; argv[0]="-du"; srcFs.mkdirs(srcFs.getHomeDirectory()); ret=ToolRunner.run(shell,argv); returned=out.toString(); assertEquals(" no error ",0,ret); assertTrue("empty path specified",(returned.lastIndexOf("empty string") == -1)); out.reset(); argv=new String[3]; argv[0]="-test"; argv[1]="-d"; argv[2]="/no/such/dir"; ret=ToolRunner.run(shell,argv); returned=out.toString(); assertEquals(" -test -d wrong result ",1,ret); assertTrue(returned.isEmpty()); } finally { if (bak != null) { System.setErr(bak); } if (cluster != null) { cluster.shutdown(); } } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testCopyToLocal() throws IOException { Configuration conf=new HdfsConfiguration(); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); FileSystem fs=cluster.getFileSystem(); assertTrue("Not a HDFS: " + fs.getUri(),fs instanceof DistributedFileSystem); DistributedFileSystem dfs=(DistributedFileSystem)fs; FsShell shell=new FsShell(); shell.setConf(conf); try { String root=createTree(dfs,"copyToLocal"); { try { assertEquals(0,runCmd(shell,"-copyToLocal",root + "*",TEST_ROOT_DIR)); } catch ( Exception e) { System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage()); } File localroot=new File(TEST_ROOT_DIR,"copyToLocal"); File localroot2=new File(TEST_ROOT_DIR,"copyToLocal2"); File f1=new File(localroot,"f1"); assertTrue("Copying failed.",f1.isFile()); File f2=new File(localroot,"f2"); assertTrue("Copying failed.",f2.isFile()); File sub=new File(localroot,"sub"); assertTrue("Copying failed.",sub.isDirectory()); File f3=new File(sub,"f3"); assertTrue("Copying failed.",f3.isFile()); File f4=new File(sub,"f4"); assertTrue("Copying failed.",f4.isFile()); File f5=new File(localroot2,"f1"); assertTrue("Copying failed.",f5.isFile()); f1.delete(); f2.delete(); f3.delete(); f4.delete(); f5.delete(); sub.delete(); } { String[] args={"-copyToLocal","nosuchfile",TEST_ROOT_DIR}; try { assertEquals(1,shell.run(args)); } catch ( Exception e) { System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage()); } File f6=new File(TEST_ROOT_DIR,"nosuchfile"); assertTrue(!f6.exists()); } } finally { try { dfs.close(); } catch ( Exception e) { } cluster.shutdown(); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=120000) public void testCopyCommandsToDirectoryWithPreserveOption() throws Exception { Configuration conf=new Configuration(); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY,true); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY,true); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).format(true).build(); FsShell shell=null; FileSystem fs=null; final String testdir="/tmp/TestDFSShell-testCopyCommandsToDirectoryWithPreserveOption-" + counter.getAndIncrement(); final Path hdfsTestDir=new Path(testdir); try { fs=cluster.getFileSystem(); fs.mkdirs(hdfsTestDir); Path srcDir=new Path(hdfsTestDir,"srcDir"); fs.mkdirs(srcDir); fs.setAcl(srcDir,Lists.newArrayList(aclEntry(ACCESS,USER,ALL),aclEntry(ACCESS,USER,"foo",ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE),aclEntry(DEFAULT,GROUP,"bar",READ_EXECUTE),aclEntry(ACCESS,OTHER,EXECUTE))); fs.setPermission(srcDir,new FsPermission(ALL,READ_EXECUTE,EXECUTE,true)); Path srcFile=new Path(srcDir,"srcFile"); fs.create(srcFile).close(); FileStatus status=fs.getFileStatus(srcDir); final long mtime=status.getModificationTime(); final long atime=status.getAccessTime(); final String owner=status.getOwner(); final String group=status.getGroup(); final FsPermission perm=status.getPermission(); fs.setXAttr(srcDir,USER_A1,USER_A1_VALUE); fs.setXAttr(srcDir,TRUSTED_A1,TRUSTED_A1_VALUE); shell=new FsShell(conf); Path targetDir1=new Path(hdfsTestDir,"targetDir1"); String[] argv=new String[]{"-cp","-p",srcDir.toUri().toString(),targetDir1.toUri().toString()}; int ret=ToolRunner.run(shell,argv); assertEquals("cp -p is not working",SUCCESS,ret); FileStatus targetStatus=fs.getFileStatus(targetDir1); assertEquals(mtime,targetStatus.getModificationTime()); assertEquals(atime,targetStatus.getAccessTime()); assertEquals(owner,targetStatus.getOwner()); assertEquals(group,targetStatus.getGroup()); FsPermission targetPerm=targetStatus.getPermission(); assertTrue(perm.equals(targetPerm)); Map xattrs=fs.getXAttrs(targetDir1); assertTrue(xattrs.isEmpty()); List acls=fs.getAclStatus(targetDir1).getEntries(); assertTrue(acls.isEmpty()); assertFalse(targetPerm.getAclBit()); Path targetDir2=new Path(hdfsTestDir,"targetDir2"); argv=new String[]{"-cp","-ptop",srcDir.toUri().toString(),targetDir2.toUri().toString()}; ret=ToolRunner.run(shell,argv); assertEquals("cp -ptop is not working",SUCCESS,ret); targetStatus=fs.getFileStatus(targetDir2); assertEquals(mtime,targetStatus.getModificationTime()); assertEquals(atime,targetStatus.getAccessTime()); assertEquals(owner,targetStatus.getOwner()); assertEquals(group,targetStatus.getGroup()); targetPerm=targetStatus.getPermission(); assertTrue(perm.equals(targetPerm)); xattrs=fs.getXAttrs(targetDir2); assertTrue(xattrs.isEmpty()); acls=fs.getAclStatus(targetDir2).getEntries(); assertTrue(acls.isEmpty()); assertFalse(targetPerm.getAclBit()); Path targetDir3=new Path(hdfsTestDir,"targetDir3"); argv=new String[]{"-cp","-ptopx",srcDir.toUri().toString(),targetDir3.toUri().toString()}; ret=ToolRunner.run(shell,argv); assertEquals("cp -ptopx is not working",SUCCESS,ret); targetStatus=fs.getFileStatus(targetDir3); assertEquals(mtime,targetStatus.getModificationTime()); assertEquals(atime,targetStatus.getAccessTime()); assertEquals(owner,targetStatus.getOwner()); assertEquals(group,targetStatus.getGroup()); targetPerm=targetStatus.getPermission(); assertTrue(perm.equals(targetPerm)); xattrs=fs.getXAttrs(targetDir3); assertEquals(xattrs.size(),2); assertArrayEquals(USER_A1_VALUE,xattrs.get(USER_A1)); assertArrayEquals(TRUSTED_A1_VALUE,xattrs.get(TRUSTED_A1)); acls=fs.getAclStatus(targetDir3).getEntries(); assertTrue(acls.isEmpty()); assertFalse(targetPerm.getAclBit()); Path targetDir4=new Path(hdfsTestDir,"targetDir4"); argv=new String[]{"-cp","-ptopa",srcDir.toUri().toString(),targetDir4.toUri().toString()}; ret=ToolRunner.run(shell,argv); assertEquals("cp -ptopa is not working",SUCCESS,ret); targetStatus=fs.getFileStatus(targetDir4); assertEquals(mtime,targetStatus.getModificationTime()); assertEquals(atime,targetStatus.getAccessTime()); assertEquals(owner,targetStatus.getOwner()); assertEquals(group,targetStatus.getGroup()); targetPerm=targetStatus.getPermission(); assertTrue(perm.equals(targetPerm)); xattrs=fs.getXAttrs(targetDir4); assertTrue(xattrs.isEmpty()); acls=fs.getAclStatus(targetDir4).getEntries(); assertFalse(acls.isEmpty()); assertTrue(targetPerm.getAclBit()); assertEquals(fs.getAclStatus(srcDir),fs.getAclStatus(targetDir4)); Path targetDir5=new Path(hdfsTestDir,"targetDir5"); argv=new String[]{"-cp","-ptoa",srcDir.toUri().toString(),targetDir5.toUri().toString()}; ret=ToolRunner.run(shell,argv); assertEquals("cp -ptoa is not working",SUCCESS,ret); targetStatus=fs.getFileStatus(targetDir5); assertEquals(mtime,targetStatus.getModificationTime()); assertEquals(atime,targetStatus.getAccessTime()); assertEquals(owner,targetStatus.getOwner()); assertEquals(group,targetStatus.getGroup()); targetPerm=targetStatus.getPermission(); assertTrue(perm.equals(targetPerm)); xattrs=fs.getXAttrs(targetDir5); assertTrue(xattrs.isEmpty()); acls=fs.getAclStatus(targetDir5).getEntries(); assertFalse(acls.isEmpty()); assertTrue(targetPerm.getAclBit()); assertEquals(fs.getAclStatus(srcDir),fs.getAclStatus(targetDir5)); } finally { if (shell != null) { shell.close(); } if (fs != null) { fs.delete(hdfsTestDir,true); fs.close(); } cluster.shutdown(); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=120000) public void testCopyCommandsPreserveAclAndStickyBit() throws Exception { Configuration conf=new Configuration(); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY,true); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).format(true).build(); FsShell shell=null; FileSystem fs=null; final String testdir="/tmp/TestDFSShell-testCopyCommandsPreserveAclAndStickyBit-" + counter.getAndIncrement(); final Path hdfsTestDir=new Path(testdir); try { fs=cluster.getFileSystem(); fs.mkdirs(hdfsTestDir); Path src=new Path(hdfsTestDir,"srcfile"); fs.create(src).close(); fs.setAcl(src,Lists.newArrayList(aclEntry(ACCESS,USER,ALL),aclEntry(ACCESS,USER,"foo",ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE),aclEntry(ACCESS,GROUP,"bar",READ_EXECUTE),aclEntry(ACCESS,OTHER,EXECUTE))); fs.setPermission(src,new FsPermission(ALL,READ_EXECUTE,EXECUTE,true)); FileStatus status=fs.getFileStatus(src); final long mtime=status.getModificationTime(); final long atime=status.getAccessTime(); final String owner=status.getOwner(); final String group=status.getGroup(); final FsPermission perm=status.getPermission(); shell=new FsShell(conf); Path target1=new Path(hdfsTestDir,"targetfile1"); String[] argv=new String[]{"-cp","-p",src.toUri().toString(),target1.toUri().toString()}; int ret=ToolRunner.run(shell,argv); assertEquals("cp is not working",SUCCESS,ret); FileStatus targetStatus=fs.getFileStatus(target1); assertEquals(mtime,targetStatus.getModificationTime()); assertEquals(atime,targetStatus.getAccessTime()); assertEquals(owner,targetStatus.getOwner()); assertEquals(group,targetStatus.getGroup()); FsPermission targetPerm=targetStatus.getPermission(); assertTrue(perm.equals(targetPerm)); List acls=fs.getAclStatus(target1).getEntries(); assertTrue(acls.isEmpty()); assertFalse(targetPerm.getAclBit()); Path target2=new Path(hdfsTestDir,"targetfile2"); argv=new String[]{"-cp","-ptopa",src.toUri().toString(),target2.toUri().toString()}; ret=ToolRunner.run(shell,argv); assertEquals("cp -ptopa is not working",SUCCESS,ret); targetStatus=fs.getFileStatus(target2); assertEquals(mtime,targetStatus.getModificationTime()); assertEquals(atime,targetStatus.getAccessTime()); assertEquals(owner,targetStatus.getOwner()); assertEquals(group,targetStatus.getGroup()); targetPerm=targetStatus.getPermission(); assertTrue(perm.equals(targetPerm)); acls=fs.getAclStatus(target2).getEntries(); assertFalse(acls.isEmpty()); assertTrue(targetPerm.getAclBit()); assertEquals(fs.getAclStatus(src),fs.getAclStatus(target2)); } finally { if (null != shell) { shell.close(); } if (null != fs) { fs.delete(hdfsTestDir,true); fs.close(); } cluster.shutdown(); } }

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=120000) public void testGetFAttrErrors() throws Exception { final UserGroupInformation user=UserGroupInformation.createUserForTesting("user",new String[]{"mygroup"}); MiniDFSCluster cluster=null; PrintStream bakErr=null; try { final Configuration conf=new HdfsConfiguration(); cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); final FileSystem fs=cluster.getFileSystem(); final Path p=new Path("/foo"); fs.mkdirs(p); bakErr=System.err; final FsShell fshell=new FsShell(conf); final ByteArrayOutputStream out=new ByteArrayOutputStream(); System.setErr(new PrintStream(out)); fs.setPermission(p,new FsPermission((short)0700)); { final int ret=ToolRunner.run(fshell,new String[]{"-setfattr","-n","user.a1","-v","1234","/foo"}); assertEquals("Returned should be 0",0,ret); out.reset(); } user.doAs(new PrivilegedExceptionAction(){ @Override public Object run() throws Exception { int ret=ToolRunner.run(fshell,new String[]{"-getfattr","-n","user.a1","/foo"}); String str=out.toString(); assertTrue("xattr value was incorrectly returned",str.indexOf("1234") == -1); out.reset(); return null; } } ); { final int ret=ToolRunner.run(fshell,new String[]{"-getfattr","-n","user.nonexistent","/foo"}); String str=out.toString(); assertTrue("xattr value was incorrectly returned",str.indexOf("getfattr: At least one of the attributes provided was not found") >= 0); out.reset(); } } finally { if (bakErr != null) { System.setErr(bakErr); } if (cluster != null) { cluster.shutdown(); } } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=120000) public void testCopyCommandsWithPreserveOption() throws Exception { Configuration conf=new Configuration(); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY,true); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY,true); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).format(true).build(); FsShell shell=null; FileSystem fs=null; final String testdir="/tmp/TestDFSShell-testCopyCommandsWithPreserveOption-" + counter.getAndIncrement(); final Path hdfsTestDir=new Path(testdir); try { fs=cluster.getFileSystem(); fs.mkdirs(hdfsTestDir); Path src=new Path(hdfsTestDir,"srcfile"); fs.create(src).close(); fs.setAcl(src,Lists.newArrayList(aclEntry(ACCESS,USER,ALL),aclEntry(ACCESS,USER,"foo",ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE),aclEntry(ACCESS,GROUP,"bar",READ_EXECUTE),aclEntry(ACCESS,OTHER,EXECUTE))); FileStatus status=fs.getFileStatus(src); final long mtime=status.getModificationTime(); final long atime=status.getAccessTime(); final String owner=status.getOwner(); final String group=status.getGroup(); final FsPermission perm=status.getPermission(); fs.setXAttr(src,USER_A1,USER_A1_VALUE); fs.setXAttr(src,TRUSTED_A1,TRUSTED_A1_VALUE); shell=new FsShell(conf); Path target1=new Path(hdfsTestDir,"targetfile1"); String[] argv=new String[]{"-cp","-p",src.toUri().toString(),target1.toUri().toString()}; int ret=ToolRunner.run(shell,argv); assertEquals("cp -p is not working",SUCCESS,ret); FileStatus targetStatus=fs.getFileStatus(target1); assertEquals(mtime,targetStatus.getModificationTime()); assertEquals(atime,targetStatus.getAccessTime()); assertEquals(owner,targetStatus.getOwner()); assertEquals(group,targetStatus.getGroup()); FsPermission targetPerm=targetStatus.getPermission(); assertTrue(perm.equals(targetPerm)); Map xattrs=fs.getXAttrs(target1); assertTrue(xattrs.isEmpty()); List acls=fs.getAclStatus(target1).getEntries(); assertTrue(acls.isEmpty()); assertFalse(targetPerm.getAclBit()); Path target2=new Path(hdfsTestDir,"targetfile2"); argv=new String[]{"-cp","-ptop",src.toUri().toString(),target2.toUri().toString()}; ret=ToolRunner.run(shell,argv); assertEquals("cp -ptop is not working",SUCCESS,ret); targetStatus=fs.getFileStatus(target2); assertEquals(mtime,targetStatus.getModificationTime()); assertEquals(atime,targetStatus.getAccessTime()); assertEquals(owner,targetStatus.getOwner()); assertEquals(group,targetStatus.getGroup()); targetPerm=targetStatus.getPermission(); assertTrue(perm.equals(targetPerm)); xattrs=fs.getXAttrs(target2); assertTrue(xattrs.isEmpty()); acls=fs.getAclStatus(target2).getEntries(); assertTrue(acls.isEmpty()); assertFalse(targetPerm.getAclBit()); Path target3=new Path(hdfsTestDir,"targetfile3"); argv=new String[]{"-cp","-ptopx",src.toUri().toString(),target3.toUri().toString()}; ret=ToolRunner.run(shell,argv); assertEquals("cp -ptopx is not working",SUCCESS,ret); targetStatus=fs.getFileStatus(target3); assertEquals(mtime,targetStatus.getModificationTime()); assertEquals(atime,targetStatus.getAccessTime()); assertEquals(owner,targetStatus.getOwner()); assertEquals(group,targetStatus.getGroup()); targetPerm=targetStatus.getPermission(); assertTrue(perm.equals(targetPerm)); xattrs=fs.getXAttrs(target3); assertEquals(xattrs.size(),2); assertArrayEquals(USER_A1_VALUE,xattrs.get(USER_A1)); assertArrayEquals(TRUSTED_A1_VALUE,xattrs.get(TRUSTED_A1)); acls=fs.getAclStatus(target3).getEntries(); assertTrue(acls.isEmpty()); assertFalse(targetPerm.getAclBit()); Path target4=new Path(hdfsTestDir,"targetfile4"); argv=new String[]{"-cp","-ptopa",src.toUri().toString(),target4.toUri().toString()}; ret=ToolRunner.run(shell,argv); assertEquals("cp -ptopa is not working",SUCCESS,ret); targetStatus=fs.getFileStatus(target4); assertEquals(mtime,targetStatus.getModificationTime()); assertEquals(atime,targetStatus.getAccessTime()); assertEquals(owner,targetStatus.getOwner()); assertEquals(group,targetStatus.getGroup()); targetPerm=targetStatus.getPermission(); assertTrue(perm.equals(targetPerm)); xattrs=fs.getXAttrs(target4); assertTrue(xattrs.isEmpty()); acls=fs.getAclStatus(target4).getEntries(); assertFalse(acls.isEmpty()); assertTrue(targetPerm.getAclBit()); assertEquals(fs.getAclStatus(src),fs.getAclStatus(target4)); Path target5=new Path(hdfsTestDir,"targetfile5"); argv=new String[]{"-cp","-ptoa",src.toUri().toString(),target5.toUri().toString()}; ret=ToolRunner.run(shell,argv); assertEquals("cp -ptoa is not working",SUCCESS,ret); targetStatus=fs.getFileStatus(target5); assertEquals(mtime,targetStatus.getModificationTime()); assertEquals(atime,targetStatus.getAccessTime()); assertEquals(owner,targetStatus.getOwner()); assertEquals(group,targetStatus.getGroup()); targetPerm=targetStatus.getPermission(); assertTrue(perm.equals(targetPerm)); xattrs=fs.getXAttrs(target5); assertTrue(xattrs.isEmpty()); acls=fs.getAclStatus(target5).getEntries(); assertFalse(acls.isEmpty()); assertTrue(targetPerm.getAclBit()); assertEquals(fs.getAclStatus(src),fs.getAclStatus(target5)); } finally { if (null != shell) { shell.close(); } if (null != fs) { fs.delete(hdfsTestDir,true); fs.close(); } cluster.shutdown(); } }

Class: org.apache.hadoop.hdfs.TestDFSStartupVersions

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * This test ensures the appropriate response (successful or failure) from * a Datanode when the system is started with differing version combinations. *
 * For each 3-tuple in the cross product
 * ({oldLayoutVersion,currentLayoutVersion,futureLayoutVersion},
 * {currentNamespaceId,incorrectNamespaceId},
 * {pastFsscTime,currentFsscTime,futureFsscTime})
 * 1. Startup Namenode with version file containing 
 * (currentLayoutVersion,currentNamespaceId,currentFsscTime)
 * 2. Attempt to startup Datanode with version file containing 
 * this iterations version 3-tuple
 * 
*/ @Test(timeout=300000) public void testVersions() throws Exception { UpgradeUtilities.initialize(); Configuration conf=UpgradeUtilities.initializeStorageStateConf(1,new HdfsConfiguration()); StorageData[] versions=initializeVersions(); UpgradeUtilities.createNameNodeStorageDirs(conf.getStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY),"current"); cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false).manageDataDfsDirs(false).manageNameDfsDirs(false).startupOption(StartupOption.REGULAR).build(); StorageData nameNodeVersion=new StorageData(HdfsConstants.NAMENODE_LAYOUT_VERSION,UpgradeUtilities.getCurrentNamespaceID(cluster),UpgradeUtilities.getCurrentClusterID(cluster),UpgradeUtilities.getCurrentFsscTime(cluster),UpgradeUtilities.getCurrentBlockPoolID(cluster)); log("NameNode version info",NAME_NODE,null,nameNodeVersion); String bpid=UpgradeUtilities.getCurrentBlockPoolID(cluster); for (int i=0; i < versions.length; i++) { File[] storage=UpgradeUtilities.createDataNodeStorageDirs(conf.getStrings(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY),"current"); log("DataNode version info",DATA_NODE,i,versions[i]); UpgradeUtilities.createDataNodeVersionFile(storage,versions[i].storageInfo,bpid,versions[i].blockPoolId); try { cluster.startDataNodes(conf,1,false,StartupOption.REGULAR,null); } catch ( Exception ignore) { } assertTrue(cluster.getNameNode() != null); assertEquals(isVersionCompatible(nameNodeVersion,versions[i]),cluster.isDataNodeUp()); cluster.shutdownDataNodes(); } }

Class: org.apache.hadoop.hdfs.TestDFSUpgrade

UtilityVerifier ExceptionVerifier HybridVerifier 
@Test(expected=IOException.class) public void testUpgradeFromPreUpgradeLVFails() throws IOException { Storage.checkVersionUpgradable(Storage.LAST_PRE_UPGRADE_LAYOUT_VERSION + 1); fail("Expected IOException is not thrown"); }

IterativeVerifier UtilityVerifier EqualityVerifier HybridVerifier 
/** * This test attempts to upgrade the NameNode and DataNode under * a number of valid and invalid conditions. */ @Test(timeout=60000) public void testUpgrade() throws Exception { File[] baseDirs; StorageInfo storageInfo=null; for (int numDirs=1; numDirs <= 2; numDirs++) { conf=new HdfsConfiguration(); conf=UpgradeUtilities.initializeStorageStateConf(numDirs,conf); String[] nameNodeDirs=conf.getStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY); String[] dataNodeDirs=conf.getStrings(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY); log("Normal NameNode upgrade",numDirs); UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"current"); cluster=createCluster(); try { final DistributedFileSystem dfs=cluster.getFileSystem(); dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER); dfs.rollingUpgrade(RollingUpgradeAction.PREPARE); fail(); } catch ( RemoteException re) { assertEquals(InconsistentFSStateException.class.getName(),re.getClassName()); LOG.info("The exception is expected.",re); } checkNameNode(nameNodeDirs,EXPECTED_TXID); if (numDirs > 1) TestParallelImageWrite.checkImages(cluster.getNamesystem(),numDirs); cluster.shutdown(); UpgradeUtilities.createEmptyDirs(nameNodeDirs); log("Normal DataNode upgrade",numDirs); UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"current"); cluster=createCluster(); UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs,"current"); cluster.startDataNodes(conf,1,false,StartupOption.REGULAR,null); checkDataNode(dataNodeDirs,UpgradeUtilities.getCurrentBlockPoolID(null)); cluster.shutdown(); UpgradeUtilities.createEmptyDirs(nameNodeDirs); UpgradeUtilities.createEmptyDirs(dataNodeDirs); log("NameNode upgrade with existing previous dir",numDirs); UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"current"); UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"previous"); startNameNodeShouldFail(StartupOption.UPGRADE); UpgradeUtilities.createEmptyDirs(nameNodeDirs); log("DataNode upgrade with existing previous dir",numDirs); UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"current"); cluster=createCluster(); UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs,"current"); UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs,"previous"); cluster.startDataNodes(conf,1,false,StartupOption.REGULAR,null); checkDataNode(dataNodeDirs,UpgradeUtilities.getCurrentBlockPoolID(null)); cluster.shutdown(); UpgradeUtilities.createEmptyDirs(nameNodeDirs); UpgradeUtilities.createEmptyDirs(dataNodeDirs); log("DataNode upgrade with future stored layout version in current",numDirs); UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"current"); cluster=createCluster(); baseDirs=UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs,"current"); storageInfo=new StorageInfo(Integer.MIN_VALUE,UpgradeUtilities.getCurrentNamespaceID(cluster),UpgradeUtilities.getCurrentClusterID(cluster),UpgradeUtilities.getCurrentFsscTime(cluster),NodeType.DATA_NODE); UpgradeUtilities.createDataNodeVersionFile(baseDirs,storageInfo,UpgradeUtilities.getCurrentBlockPoolID(cluster)); startBlockPoolShouldFail(StartupOption.REGULAR,UpgradeUtilities.getCurrentBlockPoolID(null)); cluster.shutdown(); UpgradeUtilities.createEmptyDirs(nameNodeDirs); UpgradeUtilities.createEmptyDirs(dataNodeDirs); log("DataNode upgrade with newer fsscTime in current",numDirs); UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"current"); cluster=createCluster(); baseDirs=UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs,"current"); storageInfo=new StorageInfo(HdfsConstants.DATANODE_LAYOUT_VERSION,UpgradeUtilities.getCurrentNamespaceID(cluster),UpgradeUtilities.getCurrentClusterID(cluster),Long.MAX_VALUE,NodeType.DATA_NODE); UpgradeUtilities.createDataNodeVersionFile(baseDirs,storageInfo,UpgradeUtilities.getCurrentBlockPoolID(cluster)); startBlockPoolShouldFail(StartupOption.REGULAR,UpgradeUtilities.getCurrentBlockPoolID(null)); cluster.shutdown(); UpgradeUtilities.createEmptyDirs(nameNodeDirs); UpgradeUtilities.createEmptyDirs(dataNodeDirs); log("NameNode upgrade with no edits file",numDirs); UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"current"); deleteStorageFilesWithPrefix(nameNodeDirs,"edits_"); startNameNodeShouldFail(StartupOption.UPGRADE); UpgradeUtilities.createEmptyDirs(nameNodeDirs); log("NameNode upgrade with no image file",numDirs); UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"current"); deleteStorageFilesWithPrefix(nameNodeDirs,"fsimage_"); startNameNodeShouldFail(StartupOption.UPGRADE); UpgradeUtilities.createEmptyDirs(nameNodeDirs); log("NameNode upgrade with corrupt version file",numDirs); baseDirs=UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"current"); for ( File f : baseDirs) { UpgradeUtilities.corruptFile(new File(f,"VERSION"),"layoutVersion".getBytes(Charsets.UTF_8),"xxxxxxxxxxxxx".getBytes(Charsets.UTF_8)); } startNameNodeShouldFail(StartupOption.UPGRADE); UpgradeUtilities.createEmptyDirs(nameNodeDirs); log("NameNode upgrade with old layout version in current",numDirs); baseDirs=UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"current"); storageInfo=new StorageInfo(Storage.LAST_UPGRADABLE_LAYOUT_VERSION + 1,UpgradeUtilities.getCurrentNamespaceID(null),UpgradeUtilities.getCurrentClusterID(null),UpgradeUtilities.getCurrentFsscTime(null),NodeType.NAME_NODE); UpgradeUtilities.createNameNodeVersionFile(conf,baseDirs,storageInfo,UpgradeUtilities.getCurrentBlockPoolID(cluster)); startNameNodeShouldFail(StartupOption.UPGRADE); UpgradeUtilities.createEmptyDirs(nameNodeDirs); log("NameNode upgrade with future layout version in current",numDirs); baseDirs=UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"current"); storageInfo=new StorageInfo(Integer.MIN_VALUE,UpgradeUtilities.getCurrentNamespaceID(null),UpgradeUtilities.getCurrentClusterID(null),UpgradeUtilities.getCurrentFsscTime(null),NodeType.NAME_NODE); UpgradeUtilities.createNameNodeVersionFile(conf,baseDirs,storageInfo,UpgradeUtilities.getCurrentBlockPoolID(cluster)); startNameNodeShouldFail(StartupOption.UPGRADE); UpgradeUtilities.createEmptyDirs(nameNodeDirs); } int numDirs=4; { conf=new HdfsConfiguration(); conf.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY,-1); conf=UpgradeUtilities.initializeStorageStateConf(numDirs,conf); String[] nameNodeDirs=conf.getStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY); log("Normal NameNode upgrade",numDirs); UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"current"); cluster=createCluster(); try { final DistributedFileSystem dfs=cluster.getFileSystem(); dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER); dfs.rollingUpgrade(RollingUpgradeAction.PREPARE); fail(); } catch ( RemoteException re) { assertEquals(InconsistentFSStateException.class.getName(),re.getClassName()); LOG.info("The exception is expected.",re); } checkNameNode(nameNodeDirs,EXPECTED_TXID); TestParallelImageWrite.checkImages(cluster.getNamesystem(),numDirs); cluster.shutdown(); UpgradeUtilities.createEmptyDirs(nameNodeDirs); } }

BooleanVerifier IgnoredMethod HybridVerifier 
@Ignore public void test203LayoutVersion(){ for ( int lv : Storage.LAYOUT_VERSIONS_203) { assertTrue(Storage.is203LayoutVersion(lv)); } }

Class: org.apache.hadoop.hdfs.TestDFSUpgradeFromImage

IterativeVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test upgrade from 2.0 image with a variety of .snapshot and .reserved * paths to test renaming on upgrade */ @Test public void testUpgradeFromRel2ReservedImage() throws Exception { unpackStorage(HADOOP2_RESERVED_IMAGE,HADOOP_DFS_DIR_TXT); MiniDFSCluster cluster=null; final Configuration conf=new Configuration(); try { cluster=new MiniDFSCluster.Builder(conf).format(false).startupOption(StartupOption.UPGRADE).numDataNodes(0).build(); } catch ( IllegalArgumentException e) { GenericTestUtils.assertExceptionContains("reserved path component in this version",e); } finally { if (cluster != null) { cluster.shutdown(); } } try { FSImageFormat.setRenameReservedPairs(".snapshot=.user-snapshot," + ".reserved=.my-reserved"); cluster=new MiniDFSCluster.Builder(conf).format(false).startupOption(StartupOption.UPGRADE).numDataNodes(0).build(); DistributedFileSystem dfs=cluster.getFileSystem(); final String[] expected=new String[]{"/edits","/edits/.reserved","/edits/.user-snapshot","/edits/.user-snapshot/editsdir","/edits/.user-snapshot/editsdir/editscontents","/edits/.user-snapshot/editsdir/editsdir2","/image","/image/.reserved","/image/.user-snapshot","/image/.user-snapshot/imagedir","/image/.user-snapshot/imagedir/imagecontents","/image/.user-snapshot/imagedir/imagedir2","/.my-reserved","/.my-reserved/edits-touch","/.my-reserved/image-touch"}; for (int i=0; i < 2; i++) { if (i == 1) { cluster.finalizeCluster(conf); cluster.restartNameNode(true); } ArrayList toList=new ArrayList(); toList.add(new Path("/")); ArrayList found=new ArrayList(); while (!toList.isEmpty()) { Path p=toList.remove(0); FileStatus[] statuses=dfs.listStatus(p); for ( FileStatus status : statuses) { final String path=status.getPath().toUri().getPath(); System.out.println("Found path " + path); found.add(path); if (status.isDirectory()) { toList.add(status.getPath()); } } } for ( String s : expected) { assertTrue("Did not find expected path " + s,found.contains(s)); } assertEquals("Found an unexpected path while listing filesystem",found.size(),expected.length); } } finally { if (cluster != null) { cluster.shutdown(); } } }

IterativeVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test upgrade from a branch-1.2 image with reserved paths */ @Test public void testUpgradeFromRel1ReservedImage() throws Exception { unpackStorage(HADOOP1_RESERVED_IMAGE,HADOOP_DFS_DIR_TXT); MiniDFSCluster cluster=null; final Configuration conf=new Configuration(); try { FSImageFormat.setRenameReservedPairs(".snapshot=.user-snapshot," + ".reserved=.my-reserved"); cluster=new MiniDFSCluster.Builder(conf).format(false).startupOption(StartupOption.UPGRADE).numDataNodes(0).build(); DistributedFileSystem dfs=cluster.getFileSystem(); final String[] expected=new String[]{"/.my-reserved","/.user-snapshot","/.user-snapshot/.user-snapshot","/.user-snapshot/open","/dir1","/dir1/.user-snapshot","/dir2","/dir2/.user-snapshot","/user","/user/andrew","/user/andrew/.user-snapshot"}; for (int i=0; i < 2; i++) { if (i == 1) { cluster.finalizeCluster(conf); cluster.restartNameNode(true); } ArrayList toList=new ArrayList(); toList.add(new Path("/")); ArrayList found=new ArrayList(); while (!toList.isEmpty()) { Path p=toList.remove(0); FileStatus[] statuses=dfs.listStatus(p); for ( FileStatus status : statuses) { final String path=status.getPath().toUri().getPath(); System.out.println("Found path " + path); found.add(path); if (status.isDirectory()) { toList.add(status.getPath()); } } } for ( String s : expected) { assertTrue("Did not find expected path " + s,found.contains(s)); } assertEquals("Found an unexpected path while listing filesystem",found.size(),expected.length); } } finally { if (cluster != null) { cluster.shutdown(); } } }

UtilityVerifier BooleanVerifier HybridVerifier 
/** * Test that sets up a fake image from Hadoop 0.3.0 and tries to start a * NN, verifying that the correct error message is thrown. */ @Test public void testFailOnPreUpgradeImage() throws IOException { Configuration conf=new HdfsConfiguration(); File namenodeStorage=new File(TEST_ROOT_DIR,"nnimage-0.3.0"); conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,namenodeStorage.toString()); FileUtil.fullyDelete(namenodeStorage); assertTrue("Make " + namenodeStorage,namenodeStorage.mkdirs()); File imageDir=new File(namenodeStorage,"image"); assertTrue("Make " + imageDir,imageDir.mkdirs()); File imageFile=new File(imageDir,"fsimage"); byte[] imageBytes=StringUtils.hexStringToByte("fffffffee17c0d2700000000"); FileOutputStream fos=new FileOutputStream(imageFile); try { fos.write(imageBytes); } finally { fos.close(); } MiniDFSCluster cluster=null; try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false).manageDataDfsDirs(false).manageNameDfsDirs(false).startupOption(StartupOption.REGULAR).build(); fail("Was able to start NN from 0.3.0 image"); } catch ( IOException ioe) { if (!ioe.toString().contains("Old layout version is 'too old'")) { throw ioe; } } finally { if (cluster != null) { cluster.shutdown(); } } }

IterativeVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test upgrade from a 0.23.11 image with reserved paths */ @Test public void testUpgradeFromRel023ReservedImage() throws Exception { unpackStorage(HADOOP023_RESERVED_IMAGE,HADOOP_DFS_DIR_TXT); MiniDFSCluster cluster=null; final Configuration conf=new Configuration(); try { FSImageFormat.setRenameReservedPairs(".snapshot=.user-snapshot," + ".reserved=.my-reserved"); cluster=new MiniDFSCluster.Builder(conf).format(false).startupOption(StartupOption.UPGRADE).numDataNodes(0).build(); DistributedFileSystem dfs=cluster.getFileSystem(); final String[] expected=new String[]{"/.user-snapshot","/dir1","/dir1/.user-snapshot","/dir2","/dir2/.user-snapshot"}; for (int i=0; i < 2; i++) { if (i == 1) { cluster.finalizeCluster(conf); cluster.restartNameNode(true); } ArrayList toList=new ArrayList(); toList.add(new Path("/")); ArrayList found=new ArrayList(); while (!toList.isEmpty()) { Path p=toList.remove(0); FileStatus[] statuses=dfs.listStatus(p); for ( FileStatus status : statuses) { final String path=status.getPath().toUri().getPath(); System.out.println("Found path " + path); found.add(path); if (status.isDirectory()) { toList.add(status.getPath()); } } } for ( String s : expected) { assertTrue("Did not find expected path " + s,found.contains(s)); } assertEquals("Found an unexpected path while listing filesystem",found.size(),expected.length); } } finally { if (cluster != null) { cluster.shutdown(); } } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
/** * Test upgrade from 0.22 image with corrupt md5, make sure it * fails to upgrade */ @Test public void testUpgradeFromCorruptRel22Image() throws IOException { unpackStorage(HADOOP22_IMAGE,HADOOP_DFS_DIR_TXT); File baseDir=new File(MiniDFSCluster.getBaseDirectory()); FSImageTestUtil.corruptVersionFile(new File(baseDir,"name1/current/VERSION"),"imageMD5Digest","22222222222222222222222222222222"); FSImageTestUtil.corruptVersionFile(new File(baseDir,"name2/current/VERSION"),"imageMD5Digest","22222222222222222222222222222222"); final LogVerificationAppender appender=new LogVerificationAppender(); final Logger logger=Logger.getRootLogger(); logger.addAppender(appender); try { upgradeAndVerify(new MiniDFSCluster.Builder(upgradeConf).numDataNodes(4)); fail("Upgrade did not fail with bad MD5"); } catch ( IOException ioe) { String msg=StringUtils.stringifyException(ioe); if (!msg.contains("Failed to load an FSImage file")) { throw ioe; } int md5failures=appender.countExceptionsWithMessage(" is corrupt with MD5 checksum of "); assertEquals("Upgrade did not fail with bad MD5",1,md5failures); } }

Class: org.apache.hadoop.hdfs.TestDFSUtil

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Ensure that fs.defaultFS is set in the configuration even if neither HA nor * Federation is enabled. * Regression test for HDFS-3351. */ @Test public void testConfModificationNoFederationOrHa(){ final HdfsConfiguration conf=new HdfsConfiguration(); String nsId=null; String nnId=null; conf.set(DFS_NAMENODE_RPC_ADDRESS_KEY,"localhost:1234"); assertFalse("hdfs://localhost:1234".equals(conf.get(FS_DEFAULT_NAME_KEY))); NameNode.initializeGenericKeys(conf,nsId,nnId); assertEquals("hdfs://localhost:1234",conf.get(FS_DEFAULT_NAME_KEY)); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testGetNNUris() throws Exception { HdfsConfiguration conf=new HdfsConfiguration(); final String NS1_NN1_ADDR="ns1-nn1.example.com:8020"; final String NS1_NN2_ADDR="ns1-nn2.example.com:8020"; final String NS2_NN_ADDR="ns2-nn.example.com:8020"; final String NN1_ADDR="nn.example.com:8020"; final String NN1_SRVC_ADDR="nn.example.com:8021"; final String NN2_ADDR="nn2.example.com:8020"; conf.set(DFS_NAMESERVICES,"ns1,ns2"); conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX,"ns1"),"nn1,nn2"); conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,"ns1","nn1"),NS1_NN1_ADDR); conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,"ns1","nn2"),NS1_NN2_ADDR); conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,"ns2"),NS2_NN_ADDR); conf.set(DFS_NAMENODE_RPC_ADDRESS_KEY,"hdfs://" + NN1_ADDR); conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY,"hdfs://" + NN2_ADDR); Collection uris=DFSUtil.getNameServiceUris(conf,DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,DFS_NAMENODE_RPC_ADDRESS_KEY); assertEquals(4,uris.size()); assertTrue(uris.contains(new URI("hdfs://ns1"))); assertTrue(uris.contains(new URI("hdfs://" + NS2_NN_ADDR))); assertTrue(uris.contains(new URI("hdfs://" + NN1_ADDR))); assertTrue(uris.contains(new URI("hdfs://" + NN2_ADDR))); conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY,"viewfs://vfs-name.example.com"); uris=DFSUtil.getNameServiceUris(conf,DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,DFS_NAMENODE_RPC_ADDRESS_KEY); assertEquals(3,uris.size()); assertTrue(uris.contains(new URI("hdfs://ns1"))); assertTrue(uris.contains(new URI("hdfs://" + NS2_NN_ADDR))); assertTrue(uris.contains(new URI("hdfs://" + NN1_ADDR))); conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY,"hdfs://ns1"); uris=DFSUtil.getNameServiceUris(conf,DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,DFS_NAMENODE_RPC_ADDRESS_KEY); assertEquals(3,uris.size()); assertTrue(uris.contains(new URI("hdfs://ns1"))); assertTrue(uris.contains(new URI("hdfs://" + NS2_NN_ADDR))); assertTrue(uris.contains(new URI("hdfs://" + NN1_ADDR))); conf=new HdfsConfiguration(); conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY,"hdfs://" + NN1_ADDR); conf.set(DFS_NAMENODE_RPC_ADDRESS_KEY,NN1_ADDR); conf.set(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,NN1_SRVC_ADDR); uris=DFSUtil.getNameServiceUris(conf,DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,DFS_NAMENODE_RPC_ADDRESS_KEY); assertEquals(1,uris.size()); assertTrue(uris.contains(new URI("hdfs://" + NN1_SRVC_ADDR))); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testGetOnlyNameServiceIdOrNull(){ HdfsConfiguration conf=new HdfsConfiguration(); conf.set(DFS_NAMESERVICES,"ns1,ns2"); assertNull(DFSUtil.getOnlyNameServiceIdOrNull(conf)); conf.set(DFS_NAMESERVICES,""); assertNull(DFSUtil.getOnlyNameServiceIdOrNull(conf)); conf.set(DFS_NAMESERVICES,"ns1"); assertEquals("ns1",DFSUtil.getOnlyNameServiceIdOrNull(conf)); }

UtilityVerifier ExceptionVerifier HybridVerifier 
/** * Test {@link DFSUtil#getNamenodeNameServiceId(Configuration)} to ensure * exception is thrown when multiple rpc addresses match the local node's * address */ @Test(expected=HadoopIllegalArgumentException.class) public void testGetNameServiceIdException(){ HdfsConfiguration conf=new HdfsConfiguration(); conf.set(DFS_NAMESERVICES,"nn1,nn2"); conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,"nn1"),"localhost:9000"); conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,"nn2"),"localhost:9001"); DFSUtil.getNamenodeNameServiceId(conf); fail("Expected exception is not thrown"); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testHANameNodesWithFederation() throws URISyntaxException { HdfsConfiguration conf=new HdfsConfiguration(); final String NS1_NN1_HOST="ns1-nn1.example.com:8020"; final String NS1_NN2_HOST="ns1-nn2.example.com:8020"; final String NS2_NN1_HOST="ns2-nn1.example.com:8020"; final String NS2_NN2_HOST="ns2-nn2.example.com:8020"; conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY,"hdfs://ns1"); conf.set(DFS_NAMESERVICES,"ns1,ns2"); conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX,"ns1"),"ns1-nn1,ns1-nn2"); conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX,"ns2"),"ns2-nn1,ns2-nn2"); conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,"ns1","ns1-nn1"),NS1_NN1_HOST); conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,"ns1","ns1-nn2"),NS1_NN2_HOST); conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,"ns2","ns2-nn1"),NS2_NN1_HOST); conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,"ns2","ns2-nn2"),NS2_NN2_HOST); Map> map=DFSUtil.getHaNnRpcAddresses(conf); assertTrue(HAUtil.isHAEnabled(conf,"ns1")); assertTrue(HAUtil.isHAEnabled(conf,"ns2")); assertFalse(HAUtil.isHAEnabled(conf,"ns3")); assertEquals(NS1_NN1_HOST,map.get("ns1").get("ns1-nn1").toString()); assertEquals(NS1_NN2_HOST,map.get("ns1").get("ns1-nn2").toString()); assertEquals(NS2_NN1_HOST,map.get("ns2").get("ns2-nn1").toString()); assertEquals(NS2_NN2_HOST,map.get("ns2").get("ns2-nn2").toString()); assertEquals(NS1_NN1_HOST,DFSUtil.getNamenodeServiceAddr(conf,"ns1","ns1-nn1")); assertEquals(NS1_NN2_HOST,DFSUtil.getNamenodeServiceAddr(conf,"ns1","ns1-nn2")); assertEquals(NS2_NN1_HOST,DFSUtil.getNamenodeServiceAddr(conf,"ns2","ns2-nn1")); assertEquals(null,DFSUtil.getNamenodeServiceAddr(conf,null,"ns1-nn1")); assertEquals(null,DFSUtil.getNamenodeNameServiceId(conf)); assertEquals(null,DFSUtil.getSecondaryNameServiceId(conf)); Collection uris=DFSUtil.getNameServiceUris(conf,DFS_NAMENODE_RPC_ADDRESS_KEY); assertEquals(2,uris.size()); assertTrue(uris.contains(new URI("hdfs://ns1"))); assertTrue(uris.contains(new URI("hdfs://ns2"))); }

APIUtilityVerifier InternalCallVerifier AssumptionSetter EqualityVerifier ConditionMatcher HybridVerifier 
@Test(timeout=15000) public void testLocalhostReverseLookup(){ Assume.assumeTrue(!Shell.WINDOWS); HdfsConfiguration conf=new HdfsConfiguration(); conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY,"hdfs://127.0.0.1:8020"); Collection uris=DFSUtil.getNameServiceUris(conf); assertEquals(1,uris.size()); for ( URI uri : uris) { assertThat(uri.getHost(),not("127.0.0.1")); } }

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test conversion of LocatedBlock to BlockLocation */ @Test public void testLocatedBlocks2Locations(){ DatanodeInfo d=DFSTestUtil.getLocalDatanodeInfo(); DatanodeInfo[] ds=new DatanodeInfo[1]; ds[0]=d; ExtendedBlock b1=new ExtendedBlock("bpid",1,1,1); LocatedBlock l1=new LocatedBlock(b1,ds,0,false); ExtendedBlock b2=new ExtendedBlock("bpid",2,1,1); LocatedBlock l2=new LocatedBlock(b2,ds,0,true); List ls=Arrays.asList(l1,l2); LocatedBlocks lbs=new LocatedBlocks(10,false,ls,l2,true,null); BlockLocation[] bs=DFSUtil.locatedBlocks2Locations(lbs); assertTrue("expected 2 blocks but got " + bs.length,bs.length == 2); int corruptCount=0; for ( BlockLocation b : bs) { if (b.isCorrupt()) { corruptCount++; } } assertTrue("expected 1 corrupt files but got " + corruptCount,corruptCount == 1); bs=DFSUtil.locatedBlocks2Locations(new LocatedBlocks()); assertEquals(0,bs.length); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test for {@link DFSUtil#getNNServiceRpcAddresses(Configuration)}{@link DFSUtil#getNameServiceIdFromAddress(Configuration,InetSocketAddress,String)(Configuration)} */ @Test public void testMultipleNamenodes() throws IOException { HdfsConfiguration conf=new HdfsConfiguration(); conf.set(DFS_NAMESERVICES,"nn1,nn2"); final String NN1_ADDRESS="localhost:9000"; final String NN2_ADDRESS="localhost:9001"; final String NN3_ADDRESS="localhost:9002"; conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,"nn1"),NN1_ADDRESS); conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,"nn2"),NN2_ADDRESS); Map> nnMap=DFSUtil.getNNServiceRpcAddresses(conf); assertEquals(2,nnMap.size()); Map nn1Map=nnMap.get("nn1"); assertEquals(1,nn1Map.size()); InetSocketAddress addr=nn1Map.get(null); assertEquals("localhost",addr.getHostName()); assertEquals(9000,addr.getPort()); Map nn2Map=nnMap.get("nn2"); assertEquals(1,nn2Map.size()); addr=nn2Map.get(null); assertEquals("localhost",addr.getHostName()); assertEquals(9001,addr.getPort()); checkNameServiceId(conf,NN1_ADDRESS,"nn1"); checkNameServiceId(conf,NN2_ADDRESS,"nn2"); checkNameServiceId(conf,NN3_ADDRESS,null); assertFalse(HAUtil.isHAEnabled(conf,"nn1")); assertFalse(HAUtil.isHAEnabled(conf,"nn2")); }

Class: org.apache.hadoop.hdfs.TestDataTransferKeepalive

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test that the client respects its keepalive timeout. */ @Test(timeout=30000) public void testClientResponsesKeepAliveTimeout() throws Exception { Configuration clientConf=new Configuration(conf); final long CLIENT_EXPIRY_MS=10L; clientConf.setLong(DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY,CLIENT_EXPIRY_MS); clientConf.set(DFS_CLIENT_CONTEXT,"testClientResponsesKeepAliveTimeout"); DistributedFileSystem fs=(DistributedFileSystem)FileSystem.get(cluster.getURI(),clientConf); PeerCache peerCache=ClientContext.getFromConf(clientConf).getPeerCache(); DFSTestUtil.createFile(fs,TEST_FILE,1L,(short)1,0L); assertEquals(0,peerCache.size()); assertXceiverCount(0); DFSTestUtil.readFile(fs,TEST_FILE); assertEquals(1,peerCache.size()); assertXceiverCount(1); Thread.sleep(CLIENT_EXPIRY_MS + 1); Peer peer=peerCache.get(dn.getDatanodeId(),false); assertTrue(peer == null); assertEquals(0,peerCache.size()); }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Regression test for HDFS-3357. Check that the datanode is respecting * its configured keepalive timeout. */ @Test(timeout=30000) public void testDatanodeRespectsKeepAliveTimeout() throws Exception { Configuration clientConf=new Configuration(conf); final long CLIENT_EXPIRY_MS=60000L; clientConf.setLong(DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY,CLIENT_EXPIRY_MS); clientConf.set(DFS_CLIENT_CONTEXT,"testDatanodeRespectsKeepAliveTimeout"); DistributedFileSystem fs=(DistributedFileSystem)FileSystem.get(cluster.getURI(),clientConf); PeerCache peerCache=ClientContext.getFromConf(clientConf).getPeerCache(); DFSTestUtil.createFile(fs,TEST_FILE,1L,(short)1,0L); assertEquals(0,peerCache.size()); assertXceiverCount(0); DFSTestUtil.readFile(fs,TEST_FILE); assertEquals(1,peerCache.size()); assertXceiverCount(1); Thread.sleep(DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_DEFAULT + 1); assertXceiverCount(0); assertEquals(1,peerCache.size()); Peer peer=peerCache.get(dn.getDatanodeId(),false); assertNotNull(peer); assertEquals(-1,peer.getInputStream().read()); }

Class: org.apache.hadoop.hdfs.TestDataTransferProtocol

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testPacketHeader() throws IOException { PacketHeader hdr=new PacketHeader(4,1024,100,false,4096,false); ByteArrayOutputStream baos=new ByteArrayOutputStream(); hdr.write(new DataOutputStream(baos)); PacketHeader readBack=new PacketHeader(); ByteArrayInputStream bais=new ByteArrayInputStream(baos.toByteArray()); readBack.readFields(new DataInputStream(bais)); assertEquals(hdr,readBack); readBack=new PacketHeader(); readBack.readFields(ByteBuffer.wrap(baos.toByteArray())); assertEquals(hdr,readBack); assertTrue(hdr.sanityCheck(99)); assertFalse(hdr.sanityCheck(100)); }

Class: org.apache.hadoop.hdfs.TestDatanodeBlockScanner

APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testDuplicateScans() throws Exception { long startTime=Time.monotonicNow(); MiniDFSCluster cluster=new MiniDFSCluster.Builder(new Configuration()).numDataNodes(1).build(); FileSystem fs=null; try { fs=cluster.getFileSystem(); DataNode dataNode=cluster.getDataNodes().get(0); int infoPort=dataNode.getInfoPort(); long scanTimeBefore=0, scanTimeAfter=0; for (int i=1; i < 10; i++) { Path fileName=new Path("/test" + i); DFSTestUtil.createFile(fs,fileName,1024,(short)1,1000L); waitForVerification(infoPort,fs,fileName,i,startTime,TIMEOUT); if (i > 1) { scanTimeAfter=DataNodeTestUtils.getLatestScanTime(dataNode,DFSTestUtil.getFirstBlock(fs,new Path("/test" + (i - 1)))); assertFalse("scan time shoud not be 0",scanTimeAfter == 0); assertEquals("There should not be duplicate scan",scanTimeBefore,scanTimeAfter); } scanTimeBefore=DataNodeTestUtils.getLatestScanTime(dataNode,DFSTestUtil.getFirstBlock(fs,new Path("/test" + i))); } cluster.restartDataNode(0); Thread.sleep(10000); dataNode=cluster.getDataNodes().get(0); scanTimeAfter=DataNodeTestUtils.getLatestScanTime(dataNode,DFSTestUtil.getFirstBlock(fs,new Path("/test" + (9)))); assertEquals("There should not be duplicate scan",scanTimeBefore,scanTimeAfter); } finally { IOUtils.closeStream(fs); cluster.shutdown(); } }

Class: org.apache.hadoop.hdfs.TestDatanodeConfig

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier 
/** * Test that a data-node does not start if configuration specifies * incorrect URI scheme in data directory. * Test that a data-node starts if data directory is specified as * URI = "file:///path" or as a non URI path. */ @Test public void testDataDirectories() throws IOException { File dataDir=new File(BASE_DIR,"data").getCanonicalFile(); Configuration conf=cluster.getConfiguration(0); String dnDir=makeURI("shv",null,fileAsURI(dataDir).getPath()); conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,dnDir); DataNode dn=null; try { dn=DataNode.createDataNode(new String[]{},conf); fail(); } catch ( Exception e) { } finally { if (dn != null) { dn.shutdown(); } } assertNull("Data-node startup should have failed.",dn); String dnDir1=fileAsURI(dataDir).toString() + "1"; String dnDir2=makeURI("file","localhost",fileAsURI(dataDir).getPath() + "2"); String dnDir3=dataDir.getAbsolutePath() + "3"; conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,dnDir1 + "," + dnDir2+ ","+ dnDir3); try { cluster.startDataNodes(conf,1,false,StartupOption.REGULAR,null); assertTrue("Data-node should startup.",cluster.isDataNodeUp()); } finally { if (cluster != null) { cluster.shutdownDataNodes(); } } }

Class: org.apache.hadoop.hdfs.TestDatanodeRegistration

BranchVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
/** * Regression test for HDFS-894 ensures that, when datanodes * are restarted, the new IPC port is registered with the * namenode. */ @Test public void testChangeIpcPort() throws Exception { HdfsConfiguration conf=new HdfsConfiguration(); MiniDFSCluster cluster=null; try { cluster=new MiniDFSCluster.Builder(conf).build(); InetSocketAddress addr=new InetSocketAddress("localhost",cluster.getNameNodePort()); DFSClient client=new DFSClient(addr,conf); cluster.restartDataNodes(); DatanodeInfo[] report=client.datanodeReport(DatanodeReportType.ALL); long firstUpdateAfterRestart=report[0].getLastUpdate(); boolean gotHeartbeat=false; for (int i=0; i < 10 && !gotHeartbeat; i++) { try { Thread.sleep(i * 1000); } catch ( InterruptedException ie) { } report=client.datanodeReport(DatanodeReportType.ALL); gotHeartbeat=(report[0].getLastUpdate() > firstUpdateAfterRestart); } if (!gotHeartbeat) { fail("Never got a heartbeat from restarted datanode."); } int realIpcPort=cluster.getDataNodes().get(0).getIpcPort(); assertEquals(realIpcPort,report[0].getIpcPort()); } finally { if (cluster != null) { cluster.shutdown(); } } }

BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
/** * Ensure the datanode manager does not do host lookup after registration, * especially for node reports. * @throws Exception */ @Test public void testDNSLookups() throws Exception { MonitorDNS sm=new MonitorDNS(); System.setSecurityManager(sm); MiniDFSCluster cluster=null; try { HdfsConfiguration conf=new HdfsConfiguration(); cluster=new MiniDFSCluster.Builder(conf).numDataNodes(8).build(); cluster.waitActive(); int initialLookups=sm.lookups; assertTrue("dns security manager is active",initialLookups != 0); DatanodeManager dm=cluster.getNamesystem().getBlockManager().getDatanodeManager(); dm.refreshNodes(conf); assertEquals(initialLookups,sm.lookups); dm.refreshNodes(conf); assertEquals(initialLookups,sm.lookups); dm.getDatanodeListForReport(DatanodeReportType.ALL); assertEquals(initialLookups,sm.lookups); dm.getDatanodeListForReport(DatanodeReportType.LIVE); assertEquals(initialLookups,sm.lookups); dm.getDatanodeListForReport(DatanodeReportType.DEAD); assertEquals(initialLookups,sm.lookups); } finally { if (cluster != null) { cluster.shutdown(); } System.setSecurityManager(null); } }

Class: org.apache.hadoop.hdfs.TestDecommission

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Tests decommission with replicas on the target datanode cannot be migrated * to other datanodes and satisfy the replication factor. Make sure the * datanode won't get stuck in decommissioning state. */ @Test(timeout=360000) public void testDecommission2() throws IOException { LOG.info("Starting test testDecommission"); int numNamenodes=1; int numDatanodes=4; conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY,3); startCluster(numNamenodes,numDatanodes,conf); ArrayList> namenodeDecomList=new ArrayList>(numNamenodes); namenodeDecomList.add(0,new ArrayList(numDatanodes)); Path file1=new Path("testDecommission2.dat"); int replicas=4; ArrayList decommissionedNodes=namenodeDecomList.get(0); FileSystem fileSys=cluster.getFileSystem(0); FSNamesystem ns=cluster.getNamesystem(0); writeFile(fileSys,file1,replicas); int deadDecomissioned=ns.getNumDecomDeadDataNodes(); int liveDecomissioned=ns.getNumDecomLiveDataNodes(); DatanodeInfo decomNode=decommissionNode(0,null,decommissionedNodes,AdminStates.DECOMMISSIONED); decommissionedNodes.add(decomNode); assertEquals(deadDecomissioned,ns.getNumDecomDeadDataNodes()); assertEquals(liveDecomissioned + 1,ns.getNumDecomLiveDataNodes()); DFSClient client=getDfsClient(cluster.getNameNode(0),conf); assertEquals("All datanodes must be alive",numDatanodes,client.datanodeReport(DatanodeReportType.LIVE).length); assertNull(checkFile(fileSys,file1,replicas,decomNode.getXferAddr(),numDatanodes)); cleanupFile(fileSys,file1); cluster.shutdown(); startCluster(1,4,conf); cluster.shutdown(); }

APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test using a "registration name" in a host include file. * Registration names are DataNode names specified in the configuration by * dfs.datanode.hostname. The DataNode will send this name to the NameNode * as part of its registration. Registration names are helpful when you * want to override the normal first result of DNS resolution on the * NameNode. For example, a given datanode IP may map to two hostnames, * and you may want to choose which hostname is used internally in the * cluster. * It is not recommended to use a registration name which is not also a * valid DNS hostname for the DataNode. See HDFS-5237 for background. */ @Test(timeout=360000) public void testIncludeByRegistrationName() throws IOException, InterruptedException { Configuration hdfsConf=new Configuration(conf); final String registrationName="127.0.0.100"; final String nonExistentDn="127.0.0.10"; hdfsConf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY,registrationName); cluster=new MiniDFSCluster.Builder(hdfsConf).numDataNodes(1).checkDataNodeHostConfig(true).setupHostsFile(true).build(); cluster.waitActive(); ArrayList nodes=new ArrayList(); nodes.add(nonExistentDn); writeConfigFile(hostsFile,nodes); refreshNodes(cluster.getNamesystem(0),hdfsConf); DFSClient client=getDfsClient(cluster.getNameNode(0),hdfsConf); while (true) { DatanodeInfo info[]=client.datanodeReport(DatanodeReportType.DEAD); if (info.length == 1) { break; } LOG.info("Waiting for datanode to be marked dead"); Thread.sleep(HEARTBEAT_INTERVAL * 1000); } int dnPort=cluster.getDataNodes().get(0).getXferPort(); nodes=new ArrayList(); nodes.add(registrationName + ":" + dnPort); writeConfigFile(hostsFile,nodes); refreshNodes(cluster.getNamesystem(0),hdfsConf); cluster.restartDataNode(0); while (true) { DatanodeInfo info[]=client.datanodeReport(DatanodeReportType.LIVE); if (info.length == 1) { Assert.assertFalse(info[0].isDecommissioned()); Assert.assertFalse(info[0].isDecommissionInProgress()); assertEquals(registrationName,info[0].getHostName()); break; } LOG.info("Waiting for datanode to come back"); Thread.sleep(HEARTBEAT_INTERVAL * 1000); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Tests restart of namenode while datanode hosts are added to exclude file */ @Test(timeout=360000) public void testDecommissionWithNamenodeRestart() throws IOException, InterruptedException { LOG.info("Starting test testDecommissionWithNamenodeRestart"); int numNamenodes=1; int numDatanodes=1; int replicas=1; startCluster(numNamenodes,numDatanodes,conf); Path file1=new Path("testDecommission.dat"); FileSystem fileSys=cluster.getFileSystem(); writeFile(fileSys,file1,replicas); DFSClient client=getDfsClient(cluster.getNameNode(),conf); DatanodeInfo[] info=client.datanodeReport(DatanodeReportType.LIVE); DatanodeID excludedDatanodeID=info[0]; String excludedDatanodeName=info[0].getXferAddr(); writeConfigFile(excludeFile,new ArrayList(Arrays.asList(excludedDatanodeName))); cluster.startDataNodes(conf,1,true,null,null,null,null); numDatanodes+=1; assertEquals("Number of datanodes should be 2 ",2,cluster.getDataNodes().size()); cluster.restartNameNode(); DatanodeInfo datanodeInfo=NameNodeAdapter.getDatanode(cluster.getNamesystem(),excludedDatanodeID); waitNodeState(datanodeInfo,AdminStates.DECOMMISSIONED); assertEquals("All datanodes must be alive",numDatanodes,client.datanodeReport(DatanodeReportType.LIVE).length); int tries=0; while (tries++ < 20) { try { Thread.sleep(1000); if (checkFile(fileSys,file1,replicas,datanodeInfo.getXferAddr(),numDatanodes) == null) { break; } } catch ( InterruptedException ie) { } } assertTrue("Checked if block was replicated after decommission, tried " + tries + " times.",tries < 20); cleanupFile(fileSys,file1); cluster.shutdown(); startCluster(numNamenodes,numDatanodes,conf); cluster.shutdown(); }

Class: org.apache.hadoop.hdfs.TestDistributedFileSystem

IterativeVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testFileChecksum() throws Exception { final long seed=RAN.nextLong(); System.out.println("seed=" + seed); RAN.setSeed(seed); final Configuration conf=getTestConfiguration(); conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY,true); final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); final FileSystem hdfs=cluster.getFileSystem(); final String nnAddr=conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY); final UserGroupInformation current=UserGroupInformation.getCurrentUser(); final UserGroupInformation ugi=UserGroupInformation.createUserForTesting(current.getShortUserName() + "x",new String[]{"user"}); try { hdfs.getFileChecksum(new Path("/test/TestNonExistingFile")); fail("Expecting FileNotFoundException"); } catch ( FileNotFoundException e) { assertTrue("Not throwing the intended exception message",e.getMessage().contains("File does not exist: /test/TestNonExistingFile")); } try { Path path=new Path("/test/TestExistingDir/"); hdfs.mkdirs(path); hdfs.getFileChecksum(path); fail("Expecting FileNotFoundException"); } catch ( FileNotFoundException e) { assertTrue("Not throwing the intended exception message",e.getMessage().contains("Path is not a file: /test/TestExistingDir")); } final String webhdfsuri=WebHdfsFileSystem.SCHEME + "://" + nnAddr; System.out.println("webhdfsuri=" + webhdfsuri); final FileSystem webhdfs=ugi.doAs(new PrivilegedExceptionAction(){ @Override public FileSystem run() throws Exception { return new Path(webhdfsuri).getFileSystem(conf); } } ); final Path dir=new Path("/filechecksum"); final int block_size=1024; final int buffer_size=conf.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY,4096); conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY,512); for (int n=0; n < 5; n++) { final byte[] data=new byte[RAN.nextInt(block_size / 2 - 1) + n * block_size + 1]; RAN.nextBytes(data); System.out.println("data.length=" + data.length); final Path foo=new Path(dir,"foo" + n); { final FSDataOutputStream out=hdfs.create(foo,false,buffer_size,(short)2,block_size); out.write(data); out.close(); } final FileChecksum hdfsfoocs=hdfs.getFileChecksum(foo); System.out.println("hdfsfoocs=" + hdfsfoocs); final FileChecksum webhdfsfoocs=webhdfs.getFileChecksum(foo); System.out.println("webhdfsfoocs=" + webhdfsfoocs); final Path webhdfsqualified=new Path(webhdfsuri + dir,"foo" + n); final FileChecksum webhdfs_qfoocs=webhdfs.getFileChecksum(webhdfsqualified); System.out.println("webhdfs_qfoocs=" + webhdfs_qfoocs); final Path zeroByteFile=new Path(dir,"zeroByteFile" + n); { final FSDataOutputStream out=hdfs.create(zeroByteFile,false,buffer_size,(short)2,block_size); out.close(); } { final FileChecksum zeroChecksum=hdfs.getFileChecksum(zeroByteFile); assertEquals(zeroChecksum.toString(),"MD5-of-0MD5-of-0CRC32:70bc8f4b72a86921468bf8e8441dce51"); } final Path bar=new Path(dir,"bar" + n); { final FSDataOutputStream out=hdfs.create(bar,false,buffer_size,(short)2,block_size); out.write(data); out.close(); } { final FileChecksum barcs=hdfs.getFileChecksum(bar); final int barhashcode=barcs.hashCode(); assertEquals(hdfsfoocs.hashCode(),barhashcode); assertEquals(hdfsfoocs,barcs); assertEquals(webhdfsfoocs.hashCode(),barhashcode); assertEquals(webhdfsfoocs,barcs); assertEquals(webhdfs_qfoocs.hashCode(),barhashcode); assertEquals(webhdfs_qfoocs,barcs); } hdfs.setPermission(dir,new FsPermission((short)0)); { try { webhdfs.getFileChecksum(webhdfsqualified); fail(); } catch ( IOException ioe) { FileSystem.LOG.info("GOOD: getting an exception",ioe); } } hdfs.setPermission(dir,new FsPermission((short)0777)); } cluster.shutdown(); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testCreateWithCustomChecksum() throws Exception { Configuration conf=getTestConfiguration(); MiniDFSCluster cluster=null; Path testBasePath=new Path("/test/csum"); Path path1=new Path(testBasePath,"file_wtih_crc1"); Path path2=new Path(testBasePath,"file_with_crc2"); ChecksumOpt opt1=new ChecksumOpt(DataChecksum.Type.CRC32C,512); ChecksumOpt opt2=new ChecksumOpt(DataChecksum.Type.CRC32,512); FsPermission perm=FsPermission.getDefault().applyUMask(FsPermission.getUMask(conf)); EnumSet flags=EnumSet.of(CreateFlag.OVERWRITE,CreateFlag.CREATE); short repl=1; try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); FileSystem dfs=cluster.getFileSystem(); dfs.mkdirs(testBasePath); FSDataOutputStream out1=dfs.create(path1,perm,flags,4096,repl,131072L,null,opt1); FSDataOutputStream out2=dfs.create(path2,perm,flags,4096,repl,131072L,null,opt2); for (int i=0; i < 1024; i++) { out1.write(i); out2.write(i); } out1.close(); out2.close(); MD5MD5CRC32FileChecksum sum1=(MD5MD5CRC32FileChecksum)dfs.getFileChecksum(path1); MD5MD5CRC32FileChecksum sum2=(MD5MD5CRC32FileChecksum)dfs.getFileChecksum(path2); assertFalse(sum1.equals(sum2)); assertEquals(DataChecksum.Type.CRC32C,sum1.getCrcType()); assertEquals(DataChecksum.Type.CRC32,sum2.getCrcType()); } finally { if (cluster != null) { cluster.getFileSystem().delete(testBasePath,true); cluster.shutdown(); } } }

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Tests error paths for{@link DistributedFileSystem#getFileBlockStorageLocations(java.util.List)} */ @Test(timeout=60000) public void testGetFileBlockStorageLocationsError() throws Exception { final Configuration conf=getTestConfiguration(); conf.setBoolean(DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED,true); conf.setInt(DFSConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS,1500); conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY,0); MiniDFSCluster cluster=null; try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); cluster.getDataNodes(); final DistributedFileSystem fs=cluster.getFileSystem(); final Path tmpFile1=new Path("/errorfile1.dat"); final Path tmpFile2=new Path("/errorfile2.dat"); DFSTestUtil.createFile(fs,tmpFile1,1024,(short)2,0xDEADDEADl); DFSTestUtil.createFile(fs,tmpFile2,1024,(short)2,0xDEADDEADl); GenericTestUtils.waitFor(new Supplier(){ @Override public Boolean get(){ try { List list=Lists.newArrayList(); list.addAll(Arrays.asList(fs.getFileBlockLocations(tmpFile1,0,1024))); list.addAll(Arrays.asList(fs.getFileBlockLocations(tmpFile2,0,1024))); int totalRepl=0; for ( BlockLocation loc : list) { totalRepl+=loc.getHosts().length; } if (totalRepl == 4) { return true; } } catch ( IOException e) { } return false; } } ,500,30000); BlockLocation[] blockLocs1=fs.getFileBlockLocations(tmpFile1,0,1024); BlockLocation[] blockLocs2=fs.getFileBlockLocations(tmpFile2,0,1024); List allLocs=Lists.newArrayList(); allLocs.addAll(Arrays.asList(blockLocs1)); allLocs.addAll(Arrays.asList(blockLocs2)); DataNodeFaultInjector injector=Mockito.mock(DataNodeFaultInjector.class); Mockito.doAnswer(new Answer(){ @Override public Void answer( InvocationOnMock invocation) throws Throwable { Thread.sleep(3000); return null; } } ).when(injector).getHdfsBlocksMetadata(); DataNodeFaultInjector.instance=injector; BlockStorageLocation[] locs=fs.getFileBlockStorageLocations(allLocs); for ( BlockStorageLocation loc : locs) { assertEquals("Found more than 0 cached hosts although RPCs supposedly timed out",0,loc.getCachedHosts().length); } DataNodeFaultInjector.instance=new DataNodeFaultInjector(); DataNodeProperties stoppedNode=cluster.stopDataNode(0); locs=fs.getFileBlockStorageLocations(allLocs); assertEquals("Expected two HdfsBlockLocation for two 1-block files",2,locs.length); for ( BlockStorageLocation l : locs) { assertEquals("Expected two replicas for each block",2,l.getHosts().length); assertEquals("Expected two VolumeIDs for each block",2,l.getVolumeIds().length); assertTrue("Expected one valid and one invalid volume",(l.getVolumeIds()[0] == null) ^ (l.getVolumeIds()[1] == null)); } cluster.restartDataNode(stoppedNode,true); cluster.waitActive(); fs.delete(tmpFile2,true); HATestUtil.waitForNNToIssueDeletions(cluster.getNameNode()); cluster.triggerHeartbeats(); HATestUtil.waitForDNDeletions(cluster); locs=fs.getFileBlockStorageLocations(allLocs); assertEquals("Expected two HdfsBlockLocations for two 1-block files",2,locs.length); assertNotNull(locs[0].getVolumeIds()[0]); assertNotNull(locs[0].getVolumeIds()[1]); assertNull(locs[1].getVolumeIds()[0]); assertNull(locs[1].getVolumeIds()[1]); } finally { if (cluster != null) { cluster.shutdown(); } } }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testDFSClient() throws Exception { Configuration conf=getTestConfiguration(); final long grace=1000L; MiniDFSCluster cluster=null; try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); final String filepathstring="/test/LeaseChecker/foo"; final Path[] filepaths=new Path[4]; for (int i=0; i < filepaths.length; i++) { filepaths[i]=new Path(filepathstring + i); } final long millis=Time.now(); { final DistributedFileSystem dfs=cluster.getFileSystem(); dfs.dfs.getLeaseRenewer().setGraceSleepPeriod(grace); assertFalse(dfs.dfs.getLeaseRenewer().isRunning()); { final FSDataOutputStream out=dfs.create(filepaths[0]); assertTrue(dfs.dfs.getLeaseRenewer().isRunning()); out.writeLong(millis); assertTrue(dfs.dfs.getLeaseRenewer().isRunning()); out.close(); Thread.sleep(grace / 4 * 3); assertTrue(dfs.dfs.getLeaseRenewer().isRunning()); for (int i=0; i < 3; i++) { if (dfs.dfs.getLeaseRenewer().isRunning()) { Thread.sleep(grace / 2); } } assertFalse(dfs.dfs.getLeaseRenewer().isRunning()); } { final FSDataOutputStream out1=dfs.create(filepaths[1]); assertTrue(dfs.dfs.getLeaseRenewer().isRunning()); final FSDataOutputStream out2=dfs.create(filepaths[2]); assertTrue(dfs.dfs.getLeaseRenewer().isRunning()); out1.writeLong(millis); assertTrue(dfs.dfs.getLeaseRenewer().isRunning()); out1.close(); assertTrue(dfs.dfs.getLeaseRenewer().isRunning()); out2.writeLong(millis); assertTrue(dfs.dfs.getLeaseRenewer().isRunning()); out2.close(); Thread.sleep(grace / 4 * 3); assertTrue(dfs.dfs.getLeaseRenewer().isRunning()); } { final FSDataOutputStream out3=dfs.create(filepaths[3]); assertTrue(dfs.dfs.getLeaseRenewer().isRunning()); Thread.sleep(grace / 4 * 3); assertTrue(dfs.dfs.getLeaseRenewer().isRunning()); out3.writeLong(millis); assertTrue(dfs.dfs.getLeaseRenewer().isRunning()); out3.close(); assertTrue(dfs.dfs.getLeaseRenewer().isRunning()); Thread.sleep(grace / 4 * 3); assertTrue(dfs.dfs.getLeaseRenewer().isRunning()); for (int i=0; i < 3; i++) { if (dfs.dfs.getLeaseRenewer().isRunning()) { Thread.sleep(grace / 2); } } assertFalse(dfs.dfs.getLeaseRenewer().isRunning()); } dfs.close(); } { FileSystem fs=cluster.getFileSystem(); Path dir=new Path("/wrwelkj"); assertFalse("File should not exist for test.",fs.exists(dir)); try { FSDataInputStream in=fs.open(dir); try { in.close(); fs.close(); } finally { assertTrue("Did not get a FileNotFoundException for non-existing" + " file.",false); } } catch ( FileNotFoundException fnf) { } } { final DistributedFileSystem dfs=cluster.getFileSystem(); assertFalse(dfs.dfs.getLeaseRenewer().isRunning()); FSDataInputStream in=dfs.open(filepaths[0]); assertFalse(dfs.dfs.getLeaseRenewer().isRunning()); assertEquals(millis,in.readLong()); assertFalse(dfs.dfs.getLeaseRenewer().isRunning()); in.close(); assertFalse(dfs.dfs.getLeaseRenewer().isRunning()); dfs.close(); } { String uri="hdfs://127.0.0.1:" + cluster.getNameNodePort() + "/test/ipAddress/file"; Path path=new Path(uri); FileSystem fs=FileSystem.get(path.toUri(),conf); FSDataOutputStream out=fs.create(path); byte[] buf=new byte[1024]; out.write(buf); out.close(); FSDataInputStream in=fs.open(path); in.readFully(buf); in.close(); fs.close(); } } finally { if (cluster != null) { cluster.shutdown(); } } }

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Tests the normal path of batching up BlockLocation[]s to be passed to a * single{@link DistributedFileSystem#getFileBlockStorageLocations(java.util.List)}call */ @Test(timeout=60000) public void testGetFileBlockStorageLocationsBatching() throws Exception { final Configuration conf=getTestConfiguration(); ((Log4JLogger)ProtobufRpcEngine.LOG).getLogger().setLevel(Level.TRACE); ((Log4JLogger)BlockStorageLocationUtil.LOG).getLogger().setLevel(Level.TRACE); ((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.TRACE); conf.setBoolean(DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED,true); final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); try { final DistributedFileSystem fs=cluster.getFileSystem(); final Path tmpFile1=new Path("/tmpfile1.dat"); final Path tmpFile2=new Path("/tmpfile2.dat"); DFSTestUtil.createFile(fs,tmpFile1,1024,(short)2,0xDEADDEADl); DFSTestUtil.createFile(fs,tmpFile2,1024,(short)2,0xDEADDEADl); GenericTestUtils.waitFor(new Supplier(){ @Override public Boolean get(){ try { List list=Lists.newArrayList(); list.addAll(Arrays.asList(fs.getFileBlockLocations(tmpFile1,0,1024))); list.addAll(Arrays.asList(fs.getFileBlockLocations(tmpFile2,0,1024))); int totalRepl=0; for ( BlockLocation loc : list) { totalRepl+=loc.getHosts().length; } if (totalRepl == 4) { return true; } } catch ( IOException e) { } return false; } } ,500,30000); BlockLocation[] blockLocs1=fs.getFileBlockLocations(tmpFile1,0,1024); BlockLocation[] blockLocs2=fs.getFileBlockLocations(tmpFile2,0,1024); BlockLocation[] blockLocs=(BlockLocation[])ArrayUtils.addAll(blockLocs1,blockLocs2); BlockStorageLocation[] locs=fs.getFileBlockStorageLocations(Arrays.asList(blockLocs)); int counter=0; for ( BlockStorageLocation l : locs) { for (int i=0; i < l.getVolumeIds().length; i++) { VolumeId id=l.getVolumeIds()[i]; String name=l.getNames()[i]; if (id != null) { System.out.println("Datanode " + name + " has block "+ counter+ " on volume id "+ id.toString()); } } counter++; } assertEquals("Expected two HdfsBlockLocations for two 1-block files",2,locs.length); for ( BlockStorageLocation l : locs) { assertEquals("Expected two replicas for each block",2,l.getVolumeIds().length); for (int i=0; i < l.getVolumeIds().length; i++) { VolumeId id=l.getVolumeIds()[i]; String name=l.getNames()[i]; assertTrue("Expected block to be valid on datanode " + name,id != null); } } } finally { cluster.shutdown(); } }

Class: org.apache.hadoop.hdfs.TestEncryptedTransfer

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testLongLivedReadClientAfterRestart() throws IOException { MiniDFSCluster cluster=null; try { Configuration conf=new Configuration(); cluster=new MiniDFSCluster.Builder(conf).build(); FileSystem fs=getFileSystem(conf); writeTestDataToFile(fs); assertEquals(PLAIN_TEXT,DFSTestUtil.readFile(fs,TEST_PATH)); FileChecksum checksum=fs.getFileChecksum(TEST_PATH); fs.close(); cluster.shutdown(); setEncryptionConfigKeys(conf); cluster=new MiniDFSCluster.Builder(conf).manageDataDfsDirs(false).manageNameDfsDirs(false).format(false).startupOption(StartupOption.REGULAR).build(); fs=getFileSystem(conf); assertEquals(PLAIN_TEXT,DFSTestUtil.readFile(fs,TEST_PATH)); assertEquals(checksum,fs.getFileChecksum(TEST_PATH)); cluster.restartNameNode(); assertTrue(cluster.restartDataNode(0)); assertEquals(PLAIN_TEXT,DFSTestUtil.readFile(fs,TEST_PATH)); assertEquals(checksum,fs.getFileChecksum(TEST_PATH)); fs.close(); } finally { if (cluster != null) { cluster.shutdown(); } } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testLongLivedWriteClientAfterRestart() throws IOException { MiniDFSCluster cluster=null; try { Configuration conf=new Configuration(); setEncryptionConfigKeys(conf); cluster=new MiniDFSCluster.Builder(conf).build(); FileSystem fs=getFileSystem(conf); writeTestDataToFile(fs); assertEquals(PLAIN_TEXT,DFSTestUtil.readFile(fs,TEST_PATH)); cluster.restartNameNode(); assertTrue(cluster.restartDataNodes()); cluster.waitActive(); writeTestDataToFile(fs); assertEquals(PLAIN_TEXT + PLAIN_TEXT,DFSTestUtil.readFile(fs,TEST_PATH)); fs.close(); } finally { if (cluster != null) { cluster.shutdown(); } } }

APIUtilityVerifier BranchVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testClientThatDoesNotSupportEncryption() throws IOException { MiniDFSCluster cluster=null; try { Configuration conf=new Configuration(); conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE,10); cluster=new MiniDFSCluster.Builder(conf).build(); FileSystem fs=getFileSystem(conf); writeTestDataToFile(fs); assertEquals(PLAIN_TEXT,DFSTestUtil.readFile(fs,TEST_PATH)); fs.close(); cluster.shutdown(); setEncryptionConfigKeys(conf); cluster=new MiniDFSCluster.Builder(conf).manageDataDfsDirs(false).manageNameDfsDirs(false).format(false).startupOption(StartupOption.REGULAR).build(); fs=getFileSystem(conf); DFSClient client=DFSClientAdapter.getDFSClient((DistributedFileSystem)fs); DFSClient spyClient=Mockito.spy(client); Mockito.doReturn(false).when(spyClient).shouldEncryptData(); DFSClientAdapter.setDFSClient((DistributedFileSystem)fs,spyClient); LogCapturer logs=GenericTestUtils.LogCapturer.captureLogs(LogFactory.getLog(DataNode.class)); try { assertEquals(PLAIN_TEXT,DFSTestUtil.readFile(fs,TEST_PATH)); if (resolverClazz != null && !resolverClazz.endsWith("TestTrustedChannelResolver")) { fail("Should not have been able to read without encryption enabled."); } } catch ( IOException ioe) { GenericTestUtils.assertExceptionContains("Could not obtain block:",ioe); } finally { logs.stopCapturing(); } fs.close(); if (resolverClazz != null && !resolverClazz.endsWith("TestTrustedChannelResolver")) { GenericTestUtils.assertMatches(logs.getOutput(),"Failed to read expected encryption handshake from client at"); } } finally { if (cluster != null) { cluster.shutdown(); } } }

Class: org.apache.hadoop.hdfs.TestEncryptionZones

APIUtilityVerifier UtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Test getEncryptionZoneForPath as a non super user. */ @Test(timeout=60000) public void testGetEZAsNonSuperUser() throws Exception { final UserGroupInformation user=UserGroupInformation.createUserForTesting("user",new String[]{"mygroup"}); final Path testRoot=new Path(fsHelper.getTestRootDir()); final Path superPath=new Path(testRoot,"superuseronly"); final Path superPathFile=new Path(superPath,"file1"); final Path allPath=new Path(testRoot,"accessall"); final Path allPathFile=new Path(allPath,"file1"); final Path nonEZDir=new Path(testRoot,"nonEZDir"); final Path nonEZFile=new Path(nonEZDir,"file1"); final int len=8192; fsWrapper.mkdir(testRoot,new FsPermission((short)0777),true); fsWrapper.mkdir(superPath,new FsPermission((short)0700),false); fsWrapper.mkdir(allPath,new FsPermission((short)0777),false); fsWrapper.mkdir(nonEZDir,new FsPermission((short)0777),false); dfsAdmin.createEncryptionZone(superPath,TEST_KEY); dfsAdmin.createEncryptionZone(allPath,TEST_KEY); dfsAdmin.allowSnapshot(new Path("/")); final Path newSnap=fs.createSnapshot(new Path("/")); DFSTestUtil.createFile(fs,superPathFile,len,(short)1,0xFEED); DFSTestUtil.createFile(fs,allPathFile,len,(short)1,0xFEED); DFSTestUtil.createFile(fs,nonEZFile,len,(short)1,0xFEED); user.doAs(new PrivilegedExceptionAction(){ @Override public Object run() throws Exception { final HdfsAdmin userAdmin=new HdfsAdmin(FileSystem.getDefaultUri(conf),conf); try { userAdmin.getEncryptionZoneForPath(null); fail("should have thrown NPE"); } catch ( NullPointerException e) { } assertEquals("expected ez path",allPath.toString(),userAdmin.getEncryptionZoneForPath(allPath).getPath().toString()); assertEquals("expected ez path",allPath.toString(),userAdmin.getEncryptionZoneForPath(allPathFile).getPath().toString()); try { userAdmin.getEncryptionZoneForPath(superPathFile); fail("expected AccessControlException"); } catch ( AccessControlException e) { assertExceptionContains("Permission denied:",e); } assertNull("expected null for non-ez path",userAdmin.getEncryptionZoneForPath(nonEZDir)); assertNull("expected null for non-ez path",userAdmin.getEncryptionZoneForPath(nonEZFile)); String snapshottedAllPath=newSnap.toString() + allPath.toString(); assertEquals("expected ez path",allPath.toString(),userAdmin.getEncryptionZoneForPath(new Path(snapshottedAllPath)).getPath().toString()); fs.delete(allPathFile,false); assertEquals("expected ez path",allPath.toString(),userAdmin.getEncryptionZoneForPath(new Path(snapshottedAllPath)).getPath().toString()); fs.delete(allPath,true); assertEquals("expected ez path",allPath.toString(),userAdmin.getEncryptionZoneForPath(new Path(snapshottedAllPath)).getPath().toString()); assertNull("expected null for deleted file path",userAdmin.getEncryptionZoneForPath(allPathFile)); assertNull("expected null for deleted directory path",userAdmin.getEncryptionZoneForPath(allPath)); return null; } } ); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=120000) public void testReadWrite() throws Exception { final HdfsAdmin dfsAdmin=new HdfsAdmin(FileSystem.getDefaultUri(conf),conf); final Path baseFile=new Path("/base"); final int len=8192; DFSTestUtil.createFile(fs,baseFile,len,(short)1,0xFEED); final Path zone=new Path("/zone"); fs.mkdirs(zone); dfsAdmin.createEncryptionZone(zone,TEST_KEY); final Path encFile1=new Path(zone,"myfile"); DFSTestUtil.createFile(fs,encFile1,len,(short)1,0xFEED); verifyFilesEqual(fs,baseFile,encFile1,len); assertNumZones(1); String keyName=dfsAdmin.listEncryptionZones().next().getKeyName(); cluster.getNamesystem().getProvider().rollNewVersion(keyName); verifyFilesEqual(fs,baseFile,encFile1,len); final Path encFile2=new Path(zone,"myfile2"); DFSTestUtil.createFile(fs,encFile2,len,(short)1,0xFEED); FileEncryptionInfo feInfo1=getFileEncryptionInfo(encFile1); FileEncryptionInfo feInfo2=getFileEncryptionInfo(encFile2); assertFalse("EDEKs should be different",Arrays.equals(feInfo1.getEncryptedDataEncryptionKey(),feInfo2.getEncryptedDataEncryptionKey())); assertNotEquals("Key was rolled, versions should be different",feInfo1.getEzKeyVersionName(),feInfo2.getEzKeyVersionName()); verifyFilesEqual(fs,encFile1,encFile2,len); }

APIUtilityVerifier IterativeVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test(timeout=60000) public void testCipherSuiteNegotiation() throws Exception { final HdfsAdmin dfsAdmin=new HdfsAdmin(FileSystem.getDefaultUri(conf),conf); final Path zone=new Path("/zone"); fs.mkdirs(zone); dfsAdmin.createEncryptionZone(zone,TEST_KEY); DFSTestUtil.createFile(fs,new Path(zone,"success1"),0,(short)1,0xFEED); fs.getClient().cipherSuites=Lists.newArrayListWithCapacity(0); try { DFSTestUtil.createFile(fs,new Path(zone,"fail"),0,(short)1,0xFEED); fail("Created a file without specifying a CipherSuite!"); } catch ( UnknownCipherSuiteException e) { assertExceptionContains("No cipher suites",e); } fs.getClient().cipherSuites=Lists.newArrayListWithCapacity(3); fs.getClient().cipherSuites.add(CipherSuite.UNKNOWN); fs.getClient().cipherSuites.add(CipherSuite.UNKNOWN); fs.getClient().cipherSuites.add(CipherSuite.UNKNOWN); try { DFSTestUtil.createFile(fs,new Path(zone,"fail"),0,(short)1,0xFEED); fail("Created a file without specifying a CipherSuite!"); } catch ( UnknownCipherSuiteException e) { assertExceptionContains("No cipher suites",e); } fs.getClient().cipherSuites=Lists.newArrayListWithCapacity(3); fs.getClient().cipherSuites.add(CipherSuite.AES_CTR_NOPADDING); fs.getClient().cipherSuites.add(CipherSuite.UNKNOWN); fs.getClient().cipherSuites.add(CipherSuite.UNKNOWN); DFSTestUtil.createFile(fs,new Path(zone,"success2"),0,(short)1,0xFEED); fs.getClient().cipherSuites=Lists.newArrayListWithCapacity(3); fs.getClient().cipherSuites.add(CipherSuite.UNKNOWN); fs.getClient().cipherSuites.add(CipherSuite.UNKNOWN); fs.getClient().cipherSuites.add(CipherSuite.AES_CTR_NOPADDING); DFSTestUtil.createFile(fs,new Path(zone,"success3"),4096,(short)1,0xFEED); cluster.getNamesystem().getProvider().flush(); KeyProvider provider=KeyProviderFactory.getProviders(conf).get(0); List keys=provider.getKeys(); assertEquals("Expected NN to have created one key per zone",1,keys.size()); List allVersions=Lists.newArrayList(); for ( String key : keys) { List versions=provider.getKeyVersions(key); assertEquals("Should only have one key version per key",1,versions.size()); allVersions.addAll(versions); } for (int i=2; i <= 3; i++) { FileEncryptionInfo feInfo=getFileEncryptionInfo(new Path(zone.toString() + "/success" + i)); assertEquals(feInfo.getCipherSuite(),CipherSuite.AES_CTR_NOPADDING); } }

UtilityVerifier EqualityVerifier HybridVerifier 
/** * Tests the retry logic in startFile. We release the lock while generating * an EDEK, so tricky things can happen in the intervening time. */ @Test(timeout=120000) public void testStartFileRetry() throws Exception { final Path zone1=new Path("/zone1"); final Path file=new Path(zone1,"file1"); fsWrapper.mkdir(zone1,FsPermission.getDirDefault(),true); ExecutorService executor=Executors.newSingleThreadExecutor(); executor.submit(new InjectFaultTask(){ @Override public void doFault() throws Exception { dfsAdmin.createEncryptionZone(zone1,TEST_KEY); } @Override public void doCleanup() throws Exception { assertEquals("Expected a startFile retry",2,injector.generateCount); fsWrapper.delete(file,false); } } ).get(); executor.submit(new InjectFaultTask(){ @Override public void doFault() throws Exception { fsWrapper.delete(zone1,true); } @Override public void doCleanup() throws Exception { assertEquals("Expected no startFile retries",1,injector.generateCount); fsWrapper.delete(file,false); } } ).get(); fsWrapper.mkdir(zone1,FsPermission.getDirDefault(),true); final String otherKey="otherKey"; DFSTestUtil.createKey(otherKey,cluster,conf); dfsAdmin.createEncryptionZone(zone1,TEST_KEY); executor.submit(new InjectFaultTask(){ @Override public void doFault() throws Exception { fsWrapper.delete(zone1,true); fsWrapper.mkdir(zone1,FsPermission.getDirDefault(),true); dfsAdmin.createEncryptionZone(zone1,otherKey); } @Override public void doCleanup() throws Exception { assertEquals("Expected a startFile retry",2,injector.generateCount); fsWrapper.delete(zone1,true); } } ).get(); fsWrapper.mkdir(zone1,FsPermission.getDirDefault(),true); final String anotherKey="anotherKey"; DFSTestUtil.createKey(anotherKey,cluster,conf); dfsAdmin.createEncryptionZone(zone1,anotherKey); String keyToUse=otherKey; MyInjector injector=new MyInjector(); EncryptionFaultInjector.instance=injector; Future future=executor.submit(new CreateFileTask(fsWrapper,file)); for (int i=0; i < 10; i++) { injector.ready.await(); fsWrapper.delete(zone1,true); fsWrapper.mkdir(zone1,FsPermission.getDirDefault(),true); dfsAdmin.createEncryptionZone(zone1,keyToUse); if (keyToUse == otherKey) { keyToUse=anotherKey; } else { keyToUse=otherKey; } injector.wait.countDown(); injector=new MyInjector(); EncryptionFaultInjector.instance=injector; } try { future.get(); fail("Expected exception from too many retries"); } catch ( ExecutionException e) { assertExceptionContains("Too many retries because of encryption zone operations",e.getCause()); } }

Class: org.apache.hadoop.hdfs.TestFileAppend

UtilityVerifier EqualityVerifier HybridVerifier 
/** * Test two consecutive appends on a file with a full block. */ @Test public void testAppendTwice() throws Exception { Configuration conf=new HdfsConfiguration(); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build(); final FileSystem fs1=cluster.getFileSystem(); final FileSystem fs2=AppendTestUtil.createHdfsWithDifferentUsername(conf); try { final Path p=new Path("/testAppendTwice/foo"); final int len=1 << 16; final byte[] fileContents=AppendTestUtil.initBuffer(len); { FSDataOutputStream out=fs2.create(p,true,4096,(short)1,len); out.write(fileContents,0,len); out.close(); } fs2.append(p); fs1.append(p); Assert.fail(); } catch ( RemoteException re) { AppendTestUtil.LOG.info("Got an exception:",re); Assert.assertEquals(AlreadyBeingCreatedException.class.getName(),re.getClassName()); } finally { fs2.close(); fs1.close(); cluster.shutdown(); } }

Class: org.apache.hadoop.hdfs.TestFileAppend2

UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
/** * Creates one file, writes a few bytes to it and then closed it. * Reopens the same file for appending, write all blocks and then close. * Verify that all data exists in file. * @throws IOException an exception might be thrown */ @Test public void testSimpleAppend() throws IOException { final Configuration conf=new HdfsConfiguration(); if (simulatedStorage) { SimulatedFSDataset.setFactory(conf); } conf.setInt(DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY,50); fileContents=AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build(); FileSystem fs=cluster.getFileSystem(); try { { Path file1=new Path("/simpleAppend.dat"); FSDataOutputStream stm=AppendTestUtil.createFile(fs,file1,1); System.out.println("Created file simpleAppend.dat"); int mid=186; System.out.println("Writing " + mid + " bytes to file "+ file1); stm.write(fileContents,0,mid); stm.close(); System.out.println("Wrote and Closed first part of file."); int mid2=607; System.out.println("Writing " + mid + " bytes to file "+ file1); stm=fs.append(file1); stm.write(fileContents,mid,mid2 - mid); stm.close(); System.out.println("Wrote and Closed second part of file."); stm=fs.append(file1); assertTrue(stm.getPos() > 0); System.out.println("Writing " + (AppendTestUtil.FILE_SIZE - mid2) + " bytes to file "+ file1); stm.write(fileContents,mid2,AppendTestUtil.FILE_SIZE - mid2); System.out.println("Written second part of file"); stm.close(); System.out.println("Wrote and Closed second part of file."); AppendTestUtil.checkFullFile(fs,file1,AppendTestUtil.FILE_SIZE,fileContents,"Read 2"); } { FSDataOutputStream out=null; try { out=fs.append(new Path("/non-existing.dat")); fail("Expected to have FileNotFoundException"); } catch ( java.io.FileNotFoundException fnfe) { System.out.println("Good: got " + fnfe); fnfe.printStackTrace(System.out); } finally { IOUtils.closeStream(out); } } { Path root=new Path("/"); fs.setPermission(root,new FsPermission((short)0777)); fs.close(); final UserGroupInformation superuser=UserGroupInformation.getCurrentUser(); String username="testappenduser"; String group="testappendgroup"; assertFalse(superuser.getShortUserName().equals(username)); assertFalse(Arrays.asList(superuser.getGroupNames()).contains(group)); UserGroupInformation appenduser=UserGroupInformation.createUserForTesting(username,new String[]{group}); fs=DFSTestUtil.getFileSystemAs(appenduser,conf); Path dir=new Path(root,getClass().getSimpleName()); Path foo=new Path(dir,"foo.dat"); FSDataOutputStream out=null; int offset=0; try { out=fs.create(foo); int len=10 + AppendTestUtil.nextInt(100); out.write(fileContents,offset,len); offset+=len; } finally { IOUtils.closeStream(out); } fs.setPermission(dir,new FsPermission((short)0100)); fs.setPermission(foo,new FsPermission((short)0200)); out=null; try { out=fs.append(foo); int len=10 + AppendTestUtil.nextInt(100); out.write(fileContents,offset,len); offset+=len; } finally { IOUtils.closeStream(out); } fs.setPermission(foo,new FsPermission((short)0577)); fs.setPermission(dir,new FsPermission((short)0777)); out=null; try { out=fs.append(foo); fail("Expected to have AccessControlException"); } catch ( AccessControlException ace) { System.out.println("Good: got " + ace); ace.printStackTrace(System.out); } finally { IOUtils.closeStream(out); } } } catch ( IOException e) { System.out.println("Exception :" + e); throw e; } catch ( Throwable e) { System.out.println("Throwable :" + e); e.printStackTrace(); throw new IOException("Throwable : " + e); } finally { fs.close(); cluster.shutdown(); } }

Class: org.apache.hadoop.hdfs.TestFileAppend3

APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
/** * TC11: Racing rename * @throws IOException an exception might be thrown */ @Test public void testTC11() throws Exception { final Path p=new Path("/TC11/foo"); System.out.println("p=" + p); final int len1=(int)BLOCK_SIZE; { FSDataOutputStream out=fs.create(p,false,buffersize,REPLICATION,BLOCK_SIZE); AppendTestUtil.write(out,0,len1); out.close(); } FSDataOutputStream out=fs.append(p); final int len2=(int)BLOCK_SIZE / 2; AppendTestUtil.write(out,len1,len2); out.hflush(); final Path pnew=new Path(p + ".new"); assertTrue(fs.rename(p,pnew)); out.close(); final long len=fs.getFileStatus(pnew).getLen(); final LocatedBlocks locatedblocks=fs.dfs.getNamenode().getBlockLocations(pnew.toString(),0L,len); final int numblock=locatedblocks.locatedBlockCount(); for (int i=0; i < numblock; i++) { final LocatedBlock lb=locatedblocks.get(i); final ExtendedBlock blk=lb.getBlock(); final long size=lb.getBlockSize(); if (i < numblock - 1) { assertEquals(BLOCK_SIZE,size); } for ( DatanodeInfo datanodeinfo : lb.getLocations()) { final DataNode dn=cluster.getDataNode(datanodeinfo.getIpcPort()); final Block metainfo=DataNodeTestUtils.getFSDataset(dn).getStoredBlock(blk.getBlockPoolId(),blk.getBlockId()); assertEquals(size,metainfo.getNumBytes()); } } }

Class: org.apache.hadoop.hdfs.TestFileAppend4

UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
/** * Test that an append with no locations fails with an exception * showing insufficient locations. */ @Test(timeout=60000) public void testAppendInsufficientLocations() throws Exception { Configuration conf=new Configuration(); conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,1000); conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,1); conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY,3000); cluster=new MiniDFSCluster.Builder(conf).numDataNodes(4).build(); DistributedFileSystem fileSystem=null; try { fileSystem=cluster.getFileSystem(); Path f=new Path("/testAppend"); FSDataOutputStream create=fileSystem.create(f,(short)2); create.write("/testAppend".getBytes()); create.close(); DFSTestUtil.waitReplication(fileSystem,f,(short)2); LocatedBlocks lbs=fileSystem.dfs.getNamenode().getBlockLocations("/testAppend",0,Long.MAX_VALUE); List dnsOfCluster=cluster.getDataNodes(); DatanodeInfo[] dnsWithLocations=lbs.getLastLocatedBlock().getLocations(); for ( DataNode dn : dnsOfCluster) { for ( DatanodeInfo loc : dnsWithLocations) { if (dn.getDatanodeId().equals(loc)) { dn.shutdown(); DFSTestUtil.waitForDatanodeDeath(dn); } } } DFSTestUtil.waitReplication(fileSystem,f,(short)0); try { fileSystem.append(f); fail("Append should fail because insufficient locations"); } catch ( IOException e) { LOG.info("Expected exception: ",e); } FSDirectory dir=cluster.getNamesystem().getFSDirectory(); final INodeFile inode=INodeFile.valueOf(dir.getINode("/testAppend"),"/testAppend"); assertTrue("File should remain closed",!inode.isUnderConstruction()); } finally { if (null != fileSystem) { fileSystem.close(); } cluster.shutdown(); } }

APIUtilityVerifier BooleanVerifier NullVerifier HybridVerifier 
/** * Test case that stops a writer after finalizing a block but * before calling completeFile, and then tries to recover * the lease from another thread. */ @Test(timeout=60000) public void testRecoverFinalizedBlock() throws Throwable { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(5).build(); try { cluster.waitActive(); NamenodeProtocols preSpyNN=cluster.getNameNodeRpc(); NamenodeProtocols spyNN=spy(preSpyNN); GenericTestUtils.DelayAnswer delayer=new GenericTestUtils.DelayAnswer(LOG); doAnswer(delayer).when(spyNN).complete(anyString(),anyString(),(ExtendedBlock)anyObject(),anyLong()); DFSClient client=new DFSClient(null,spyNN,conf,null); file1=new Path("/testRecoverFinalized"); final OutputStream stm=client.create("/testRecoverFinalized",true); AppendTestUtil.write(stm,0,4096); final AtomicReference err=new AtomicReference(); Thread t=new Thread(){ @Override public void run(){ try { stm.close(); } catch ( Throwable t) { err.set(t); } } } ; t.start(); LOG.info("Waiting for close to get to latch..."); delayer.waitForCall(); LOG.info("Killing lease checker"); client.getLeaseRenewer().interruptAndJoin(); FileSystem fs1=cluster.getFileSystem(); FileSystem fs2=AppendTestUtil.createHdfsWithDifferentUsername(fs1.getConf()); LOG.info("Recovering file"); recoverFile(fs2); LOG.info("Telling close to proceed."); delayer.proceed(); LOG.info("Waiting for close to finish."); t.join(); LOG.info("Close finished."); Throwable thrownByClose=err.get(); assertNotNull(thrownByClose); assertTrue(thrownByClose instanceof IOException); if (!thrownByClose.getMessage().contains("No lease on /testRecoverFinalized")) throw thrownByClose; } finally { cluster.shutdown(); } }

APIUtilityVerifier BooleanVerifier NullVerifier HybridVerifier 
/** * Test case that stops a writer after finalizing a block but * before calling completeFile, recovers a file from another writer, * starts writing from that writer, and then has the old lease holder * call completeFile */ @Test(timeout=60000) public void testCompleteOtherLeaseHoldersFile() throws Throwable { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(5).build(); try { cluster.waitActive(); NamenodeProtocols preSpyNN=cluster.getNameNodeRpc(); NamenodeProtocols spyNN=spy(preSpyNN); GenericTestUtils.DelayAnswer delayer=new GenericTestUtils.DelayAnswer(LOG); doAnswer(delayer).when(spyNN).complete(anyString(),anyString(),(ExtendedBlock)anyObject(),anyLong()); DFSClient client=new DFSClient(null,spyNN,conf,null); file1=new Path("/testCompleteOtherLease"); final OutputStream stm=client.create("/testCompleteOtherLease",true); AppendTestUtil.write(stm,0,4096); final AtomicReference err=new AtomicReference(); Thread t=new Thread(){ @Override public void run(){ try { stm.close(); } catch ( Throwable t) { err.set(t); } } } ; t.start(); LOG.info("Waiting for close to get to latch..."); delayer.waitForCall(); LOG.info("Killing lease checker"); client.getLeaseRenewer().interruptAndJoin(); FileSystem fs1=cluster.getFileSystem(); FileSystem fs2=AppendTestUtil.createHdfsWithDifferentUsername(fs1.getConf()); LOG.info("Recovering file"); recoverFile(fs2); LOG.info("Opening file for append from new fs"); FSDataOutputStream appenderStream=fs2.append(file1); LOG.info("Writing some data from new appender"); AppendTestUtil.write(appenderStream,0,4096); LOG.info("Telling old close to proceed."); delayer.proceed(); LOG.info("Waiting for close to finish."); t.join(); LOG.info("Close finished."); Throwable thrownByClose=err.get(); assertNotNull(thrownByClose); assertTrue(thrownByClose instanceof IOException); if (!thrownByClose.getMessage().contains("Lease mismatch")) throw thrownByClose; appenderStream.close(); } finally { cluster.shutdown(); } }

Class: org.apache.hadoop.hdfs.TestFileConcurrentReader

UtilityVerifier NullVerifier HybridVerifier 
@Test(timeout=30000) public void testImmediateReadOfNewFile() throws IOException { final int blockSize=64 * 1024; final int writeSize=10 * blockSize; Configuration conf=new Configuration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,blockSize); init(conf); final int requiredSuccessfulOpens=100; final Path file=new Path("/file1"); final AtomicBoolean openerDone=new AtomicBoolean(false); final AtomicReference errorMessage=new AtomicReference(); final FSDataOutputStream out=fileSystem.create(file); final Thread writer=new Thread(new Runnable(){ @Override public void run(){ try { while (!openerDone.get()) { out.write(DFSTestUtil.generateSequentialBytes(0,writeSize)); out.hflush(); } } catch ( IOException e) { LOG.warn("error in writer",e); } finally { try { out.close(); } catch ( IOException e) { LOG.error("unable to close file"); } } } } ); Thread opener=new Thread(new Runnable(){ @Override public void run(){ try { for (int i=0; i < requiredSuccessfulOpens; i++) { fileSystem.open(file).close(); } openerDone.set(true); } catch ( IOException e) { openerDone.set(true); errorMessage.set(String.format("got exception : %s",StringUtils.stringifyException(e))); } catch ( Exception e) { openerDone.set(true); errorMessage.set(String.format("got exception : %s",StringUtils.stringifyException(e))); writer.interrupt(); fail("here"); } } } ); writer.start(); opener.start(); try { writer.join(); opener.join(); } catch ( InterruptedException e) { Thread.currentThread().interrupt(); } assertNull(errorMessage.get(),errorMessage.get()); }

Class: org.apache.hadoop.hdfs.TestFileCorruption

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test the case that a replica is reported corrupt while it is not * in blocksMap. Make sure that ArrayIndexOutOfBounds does not thrown. * See Hadoop-4351. */ @Test public void testArrayOutOfBoundsException() throws Exception { MiniDFSCluster cluster=null; try { Configuration conf=new HdfsConfiguration(); cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); cluster.waitActive(); FileSystem fs=cluster.getFileSystem(); final Path FILE_PATH=new Path("/tmp.txt"); final long FILE_LEN=1L; DFSTestUtil.createFile(fs,FILE_PATH,FILE_LEN,(short)2,1L); final String bpid=cluster.getNamesystem().getBlockPoolId(); File storageDir=cluster.getInstanceStorageDir(0,0); File dataDir=MiniDFSCluster.getFinalizedDir(storageDir,bpid); assertTrue("Data directory does not exist",dataDir.exists()); ExtendedBlock blk=getBlock(bpid,dataDir); if (blk == null) { storageDir=cluster.getInstanceStorageDir(0,1); dataDir=MiniDFSCluster.getFinalizedDir(storageDir,bpid); blk=getBlock(bpid,dataDir); } assertFalse("Data directory does not contain any blocks or there was an " + "IO error",blk == null); cluster.startDataNodes(conf,1,true,null,null); ArrayList datanodes=cluster.getDataNodes(); assertEquals(datanodes.size(),3); DataNode dataNode=datanodes.get(2); DatanodeRegistration dnR=DataNodeTestUtils.getDNRegistrationForBP(dataNode,blk.getBlockPoolId()); FSNamesystem ns=cluster.getNamesystem(); ns.writeLock(); try { cluster.getNamesystem().getBlockManager().findAndMarkBlockAsCorrupt(blk,new DatanodeInfo(dnR),"TEST","STORAGE_ID"); } finally { ns.writeUnlock(); } fs.open(FILE_PATH); fs.delete(FILE_PATH,false); } finally { if (cluster != null) { cluster.shutdown(); } } }

Class: org.apache.hadoop.hdfs.TestFileCreation

APIUtilityVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
/** * Create a file, write something, hflush but not close. * Then change lease period and wait for lease recovery. * Finally, read the block directly from each Datanode and verify the content. */ @Test public void testLeaseExpireHardLimit() throws Exception { System.out.println("testLeaseExpireHardLimit start"); final long leasePeriod=1000; final int DATANODE_NUM=3; Configuration conf=new HdfsConfiguration(); conf.setInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,1000); conf.setInt(DFS_HEARTBEAT_INTERVAL_KEY,1); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_NUM).build(); DistributedFileSystem dfs=null; try { cluster.waitActive(); dfs=cluster.getFileSystem(); final String f=DIR + "foo"; final Path fpath=new Path(f); HdfsDataOutputStream out=create(dfs,fpath,DATANODE_NUM); out.write("something".getBytes()); out.hflush(); int actualRepl=out.getCurrentBlockReplication(); assertTrue(f + " should be replicated to " + DATANODE_NUM+ " datanodes.",actualRepl == DATANODE_NUM); cluster.setLeasePeriod(leasePeriod,leasePeriod); try { Thread.sleep(5 * leasePeriod); } catch ( InterruptedException e) { } LocatedBlocks locations=dfs.dfs.getNamenode().getBlockLocations(f,0,Long.MAX_VALUE); assertEquals(1,locations.locatedBlockCount()); LocatedBlock locatedblock=locations.getLocatedBlocks().get(0); int successcount=0; for ( DatanodeInfo datanodeinfo : locatedblock.getLocations()) { DataNode datanode=cluster.getDataNode(datanodeinfo.getIpcPort()); ExtendedBlock blk=locatedblock.getBlock(); Block b=DataNodeTestUtils.getFSDataset(datanode).getStoredBlock(blk.getBlockPoolId(),blk.getBlockId()); final File blockfile=DataNodeTestUtils.getFile(datanode,blk.getBlockPoolId(),b.getBlockId()); System.out.println("blockfile=" + blockfile); if (blockfile != null) { BufferedReader in=new BufferedReader(new FileReader(blockfile)); assertEquals("something",in.readLine()); in.close(); successcount++; } } System.out.println("successcount=" + successcount); assertTrue(successcount > 0); } finally { IOUtils.closeStream(dfs); cluster.shutdown(); } System.out.println("testLeaseExpireHardLimit successful"); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
/** * Test that file leases are persisted across namenode restarts. */ @Test public void testFileCreationNamenodeRestart() throws IOException { Configuration conf=new HdfsConfiguration(); final int MAX_IDLE_TIME=2000; conf.setInt("ipc.client.connection.maxidletime",MAX_IDLE_TIME); conf.setInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,1000); conf.setInt(DFS_HEARTBEAT_INTERVAL_KEY,1); if (simulatedStorage) { SimulatedFSDataset.setFactory(conf); } MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build(); DistributedFileSystem fs=null; try { cluster.waitActive(); fs=cluster.getFileSystem(); final int nnport=cluster.getNameNodePort(); Path file1=new Path("/filestatus.dat"); HdfsDataOutputStream stm=create(fs,file1,1); System.out.println("testFileCreationNamenodeRestart: " + "Created file " + file1); assertEquals(file1 + " should be replicated to 1 datanode.",1,stm.getCurrentBlockReplication()); writeFile(stm,numBlocks * blockSize); stm.hflush(); assertEquals(file1 + " should still be replicated to 1 datanode.",1,stm.getCurrentBlockReplication()); Path fileRenamed=new Path("/filestatusRenamed.dat"); fs.rename(file1,fileRenamed); System.out.println("testFileCreationNamenodeRestart: " + "Renamed file " + file1 + " to "+ fileRenamed); file1=fileRenamed; Path file2=new Path("/filestatus2.dat"); FSDataOutputStream stm2=createFile(fs,file2,1); System.out.println("testFileCreationNamenodeRestart: " + "Created file " + file2); Path file3=new Path("/user/home/fullpath.dat"); FSDataOutputStream stm3=createFile(fs,file3,1); System.out.println("testFileCreationNamenodeRestart: " + "Created file " + file3); Path file4=new Path("/user/home/fullpath4.dat"); FSDataOutputStream stm4=createFile(fs,file4,1); System.out.println("testFileCreationNamenodeRestart: " + "Created file " + file4); fs.mkdirs(new Path("/bin")); fs.rename(new Path("/user/home"),new Path("/bin")); Path file3new=new Path("/bin/home/fullpath.dat"); System.out.println("testFileCreationNamenodeRestart: " + "Renamed file " + file3 + " to "+ file3new); Path file4new=new Path("/bin/home/fullpath4.dat"); System.out.println("testFileCreationNamenodeRestart: " + "Renamed file " + file4 + " to "+ file4new); cluster.shutdown(); try { Thread.sleep(2 * MAX_IDLE_TIME); } catch ( InterruptedException e) { } cluster=new MiniDFSCluster.Builder(conf).nameNodePort(nnport).format(false).build(); cluster.waitActive(); cluster.shutdown(); try { Thread.sleep(5000); } catch ( InterruptedException e) { } cluster=new MiniDFSCluster.Builder(conf).nameNodePort(nnport).format(false).build(); cluster.waitActive(); fs=cluster.getFileSystem(); DFSOutputStream dfstream=(DFSOutputStream)(stm.getWrappedStream()); dfstream.setTestFilename(file1.toString()); dfstream=(DFSOutputStream)(stm3.getWrappedStream()); dfstream.setTestFilename(file3new.toString()); dfstream=(DFSOutputStream)(stm4.getWrappedStream()); dfstream.setTestFilename(file4new.toString()); byte[] buffer=AppendTestUtil.randomBytes(seed,1); stm.write(buffer); stm.close(); stm2.write(buffer); stm2.close(); stm3.close(); stm4.close(); DFSClient client=fs.dfs; LocatedBlocks locations=client.getNamenode().getBlockLocations(file1.toString(),0,Long.MAX_VALUE); System.out.println("locations = " + locations.locatedBlockCount()); assertTrue("Error blocks were not cleaned up for file " + file1,locations.locatedBlockCount() == 3); locations=client.getNamenode().getBlockLocations(file2.toString(),0,Long.MAX_VALUE); System.out.println("locations = " + locations.locatedBlockCount()); assertTrue("Error blocks were not cleaned up for file " + file2,locations.locatedBlockCount() == 1); } finally { IOUtils.closeStream(fs); cluster.shutdown(); } }

UtilityVerifier AssumptionSetter EqualityVerifier HybridVerifier 
/** * Same test but the client should bind to a local interface */ @Test public void testFileCreationSetLocalInterface() throws IOException { assumeTrue(System.getProperty("os.name").startsWith("Linux")); checkFileCreation("lo",false); try { checkFileCreation("bogus-interface",false); fail("Able to specify a bogus interface"); } catch ( UnknownHostException e) { assertEquals("No such interface bogus-interface",e.getMessage()); } }

Class: org.apache.hadoop.hdfs.TestFileLengthOnClusterRestart

UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Tests the fileLength when we sync the file and restart the cluster and * Datanodes not report to Namenode yet. */ @Test(timeout=60000) public void testFileLengthWithHSyncAndClusterRestartWithOutDNsRegister() throws Exception { final Configuration conf=new HdfsConfiguration(); conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,512); final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); HdfsDataInputStream in=null; try { Path path=new Path("/tmp/TestFileLengthOnClusterRestart","test"); DistributedFileSystem dfs=cluster.getFileSystem(); FSDataOutputStream out=dfs.create(path); int fileLength=1030; out.write(new byte[fileLength]); out.hsync(); cluster.restartNameNode(); cluster.waitActive(); in=(HdfsDataInputStream)dfs.open(path,1024); Assert.assertEquals(fileLength,in.getVisibleLength()); cluster.shutdownDataNodes(); cluster.restartNameNode(false); verifyNNIsInSafeMode(dfs); try { in=(HdfsDataInputStream)dfs.open(path); Assert.fail("Expected IOException"); } catch ( IOException e) { Assert.assertTrue(e.getLocalizedMessage().indexOf("Name node is in safe mode") >= 0); } } finally { if (null != in) { in.close(); } cluster.shutdown(); } }

Class: org.apache.hadoop.hdfs.TestFileStatus

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test the FileStatus obtained calling listStatus on a file */ @Test public void testListStatusOnFile() throws IOException { FileStatus[] stats=fs.listStatus(file1); assertEquals(1,stats.length); FileStatus status=stats[0]; assertFalse(file1 + " should be a file",status.isDirectory()); assertEquals(blockSize,status.getBlockSize()); assertEquals(1,status.getReplication()); assertEquals(fileSize,status.getLen()); assertEquals(file1.makeQualified(fs.getUri(),fs.getWorkingDirectory()).toString(),status.getPath().toString()); RemoteIterator itor=fc.listStatus(file1); status=itor.next(); assertEquals(stats[0],status); assertFalse(file1 + " should be a file",status.isDirectory()); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test the FileStatus obtained calling getFileStatus on a file */ @Test public void testGetFileStatusOnFile() throws Exception { checkFile(fs,file1,1); FileStatus status=fs.getFileStatus(file1); assertFalse(file1 + " should be a file",status.isDirectory()); assertEquals(blockSize,status.getBlockSize()); assertEquals(1,status.getReplication()); assertEquals(fileSize,status.getLen()); assertEquals(file1.makeQualified(fs.getUri(),fs.getWorkingDirectory()).toString(),status.getPath().toString()); }

UtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test getting a FileStatus object using a non-existant path */ @Test public void testGetFileStatusOnNonExistantFileDir() throws IOException { Path dir=new Path("/test/mkdirs"); try { fs.listStatus(dir); fail("listStatus of non-existent path should fail"); } catch ( FileNotFoundException fe) { assertEquals("File " + dir + " does not exist.",fe.getMessage()); } try { fc.listStatus(dir); fail("listStatus of non-existent path should fail"); } catch ( FileNotFoundException fe) { assertEquals("File " + dir + " does not exist.",fe.getMessage()); } try { fs.getFileStatus(dir); fail("getFileStatus of non-existent path should fail"); } catch ( FileNotFoundException fe) { assertTrue("Exception doesn't indicate non-existant path",fe.getMessage().startsWith("File does not exist")); } }

UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test calling getFileInfo directly on the client */ @Test public void testGetFileInfo() throws IOException { Path path=new Path("/"); assertTrue("/ should be a directory",fs.getFileStatus(path).isDirectory()); HdfsFileStatus fileInfo=dfsClient.getFileInfo("/noSuchFile"); assertEquals("Non-existant file should result in null",null,fileInfo); Path path1=new Path("/name1"); Path path2=new Path("/name1/name2"); assertTrue(fs.mkdirs(path1)); FSDataOutputStream out=fs.create(path2,false); out.close(); fileInfo=dfsClient.getFileInfo(path1.toString()); assertEquals(1,fileInfo.getChildrenNum()); fileInfo=dfsClient.getFileInfo(path2.toString()); assertEquals(0,fileInfo.getChildrenNum()); try { dfsClient.getFileInfo("non-absolute"); fail("getFileInfo for a non-absolute path did not throw IOException"); } catch ( RemoteException re) { assertTrue("Wrong exception for invalid file name",re.toString().contains("Invalid file name")); } }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test FileStatus objects obtained from a directory */ @Test public void testGetFileStatusOnDir() throws Exception { Path dir=new Path("/test/mkdirs"); assertTrue("mkdir failed",fs.mkdirs(dir)); assertTrue("mkdir failed",fs.exists(dir)); FileStatus status=fs.getFileStatus(dir); assertTrue(dir + " should be a directory",status.isDirectory()); assertTrue(dir + " should be zero size ",status.getLen() == 0); assertEquals(dir.makeQualified(fs.getUri(),fs.getWorkingDirectory()).toString(),status.getPath().toString()); FileStatus[] stats=fs.listStatus(dir); assertEquals(dir + " should be empty",0,stats.length); assertEquals(dir + " should be zero size ",0,fs.getContentSummary(dir).getLength()); RemoteIterator itor=fc.listStatus(dir); assertFalse(dir + " should be empty",itor.hasNext()); Path file2=new Path(dir,"filestatus2.dat"); DFSTestUtil.createFile(fs,file2,blockSize / 4,blockSize / 4,blockSize,(short)1,seed); checkFile(fs,file2,1); status=fs.getFileStatus(file2); assertEquals(blockSize,status.getBlockSize()); assertEquals(1,status.getReplication()); file2=fs.makeQualified(file2); assertEquals(file2.toString(),status.getPath().toString()); Path file3=new Path(dir,"filestatus3.dat"); DFSTestUtil.createFile(fs,file3,blockSize / 4,blockSize / 4,blockSize,(short)1,seed); checkFile(fs,file3,1); file3=fs.makeQualified(file3); final int expected=blockSize / 2; assertEquals(dir + " size should be " + expected,expected,fs.getContentSummary(dir).getLength()); stats=fs.listStatus(dir); assertEquals(dir + " should have two entries",2,stats.length); assertEquals(file2.toString(),stats[0].getPath().toString()); assertEquals(file3.toString(),stats[1].getPath().toString()); itor=fc.listStatus(dir); assertEquals(file2.toString(),itor.next().getPath().toString()); assertEquals(file3.toString(),itor.next().getPath().toString()); assertFalse("Unexpected addtional file",itor.hasNext()); Path dir3=fs.makeQualified(new Path(dir,"dir3")); fs.mkdirs(dir3); dir3=fs.makeQualified(dir3); stats=fs.listStatus(dir); assertEquals(dir + " should have three entries",3,stats.length); assertEquals(dir3.toString(),stats[0].getPath().toString()); assertEquals(file2.toString(),stats[1].getPath().toString()); assertEquals(file3.toString(),stats[2].getPath().toString()); itor=fc.listStatus(dir); assertEquals(dir3.toString(),itor.next().getPath().toString()); assertEquals(file2.toString(),itor.next().getPath().toString()); assertEquals(file3.toString(),itor.next().getPath().toString()); assertFalse("Unexpected addtional file",itor.hasNext()); Path dir4=fs.makeQualified(new Path(dir,"dir4")); fs.mkdirs(dir4); dir4=fs.makeQualified(dir4); Path dir5=fs.makeQualified(new Path(dir,"dir5")); fs.mkdirs(dir5); dir5=fs.makeQualified(dir5); stats=fs.listStatus(dir); assertEquals(dir + " should have five entries",5,stats.length); assertEquals(dir3.toString(),stats[0].getPath().toString()); assertEquals(dir4.toString(),stats[1].getPath().toString()); assertEquals(dir5.toString(),stats[2].getPath().toString()); assertEquals(file2.toString(),stats[3].getPath().toString()); assertEquals(file3.toString(),stats[4].getPath().toString()); itor=fc.listStatus(dir); assertEquals(dir3.toString(),itor.next().getPath().toString()); assertEquals(dir4.toString(),itor.next().getPath().toString()); assertEquals(dir5.toString(),itor.next().getPath().toString()); assertEquals(file2.toString(),itor.next().getPath().toString()); assertEquals(file3.toString(),itor.next().getPath().toString()); assertFalse(itor.hasNext()); fs.delete(dir,true); }

Class: org.apache.hadoop.hdfs.TestGetBlocks

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Test if the datanodes returned by{@link ClientProtocol#getBlockLocations(String,long,long)} is correct * when stale nodes checking is enabled. Also test during the scenario when 1) * stale nodes checking is enabled, 2) a writing is going on, 3) a datanode * becomes stale happen simultaneously * @throws Exception */ @Test public void testReadSelectNonStaleDatanode() throws Exception { HdfsConfiguration conf=new HdfsConfiguration(); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY,true); long staleInterval=30 * 1000 * 60; conf.setLong(DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_KEY,staleInterval); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).racks(racks).build(); cluster.waitActive(); InetSocketAddress addr=new InetSocketAddress("localhost",cluster.getNameNodePort()); DFSClient client=new DFSClient(addr,conf); List nodeInfoList=cluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().getDatanodeListForReport(DatanodeReportType.LIVE); assertEquals("Unexpected number of datanodes",numDatanodes,nodeInfoList.size()); FileSystem fileSys=cluster.getFileSystem(); FSDataOutputStream stm=null; try { final Path fileName=new Path("/file1"); stm=fileSys.create(fileName,true,fileSys.getConf().getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY,4096),(short)3,blockSize); stm.write(new byte[(blockSize * 3) / 2]); stm.hflush(); LocatedBlocks blocks=client.getNamenode().getBlockLocations(fileName.toString(),0,blockSize); DatanodeInfo[] nodes=blocks.get(0).getLocations(); assertEquals(nodes.length,3); DataNode staleNode=null; DatanodeDescriptor staleNodeInfo=null; staleNode=this.stopDataNodeHeartbeat(cluster,nodes[0].getHostName()); assertNotNull(staleNode); staleNodeInfo=cluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().getDatanode(staleNode.getDatanodeId()); staleNodeInfo.setLastUpdate(Time.now() - staleInterval - 1); LocatedBlocks blocksAfterStale=client.getNamenode().getBlockLocations(fileName.toString(),0,blockSize); DatanodeInfo[] nodesAfterStale=blocksAfterStale.get(0).getLocations(); assertEquals(nodesAfterStale.length,3); assertEquals(nodesAfterStale[2].getHostName(),nodes[0].getHostName()); DataNodeTestUtils.setHeartbeatsDisabledForTests(staleNode,false); staleNodeInfo.setLastUpdate(Time.now()); LocatedBlock lastBlock=client.getLocatedBlocks(fileName.toString(),0,Long.MAX_VALUE).getLastLocatedBlock(); nodes=lastBlock.getLocations(); assertEquals(nodes.length,3); staleNode=this.stopDataNodeHeartbeat(cluster,nodes[0].getHostName()); assertNotNull(staleNode); cluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().getDatanode(staleNode.getDatanodeId()).setLastUpdate(Time.now() - staleInterval - 1); LocatedBlock lastBlockAfterStale=client.getLocatedBlocks(fileName.toString(),0,Long.MAX_VALUE).getLastLocatedBlock(); nodesAfterStale=lastBlockAfterStale.getLocations(); assertEquals(nodesAfterStale.length,3); assertEquals(nodesAfterStale[2].getHostName(),nodes[0].getHostName()); } finally { if (stm != null) { stm.close(); } if (client != null) { client.close(); } cluster.shutdown(); } }

Class: org.apache.hadoop.hdfs.TestHdfsAdmin

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test that we can set and clear quotas via {@link HdfsAdmin}. */ @Test public void testHdfsAdminSetQuota() throws Exception { HdfsAdmin dfsAdmin=new HdfsAdmin(FileSystem.getDefaultUri(conf),conf); FileSystem fs=null; try { fs=FileSystem.get(conf); assertTrue(fs.mkdirs(TEST_PATH)); assertEquals(-1,fs.getContentSummary(TEST_PATH).getQuota()); assertEquals(-1,fs.getContentSummary(TEST_PATH).getSpaceQuota()); dfsAdmin.setSpaceQuota(TEST_PATH,10); assertEquals(-1,fs.getContentSummary(TEST_PATH).getQuota()); assertEquals(10,fs.getContentSummary(TEST_PATH).getSpaceQuota()); dfsAdmin.setQuota(TEST_PATH,10); assertEquals(10,fs.getContentSummary(TEST_PATH).getQuota()); assertEquals(10,fs.getContentSummary(TEST_PATH).getSpaceQuota()); dfsAdmin.clearSpaceQuota(TEST_PATH); assertEquals(10,fs.getContentSummary(TEST_PATH).getQuota()); assertEquals(-1,fs.getContentSummary(TEST_PATH).getSpaceQuota()); dfsAdmin.clearQuota(TEST_PATH); assertEquals(-1,fs.getContentSummary(TEST_PATH).getQuota()); assertEquals(-1,fs.getContentSummary(TEST_PATH).getSpaceQuota()); } finally { if (fs != null) { fs.close(); } } }

Class: org.apache.hadoop.hdfs.TestLease

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@SuppressWarnings("unchecked") @Test public void testFactory() throws Exception { final String[] groups=new String[]{"supergroup"}; final UserGroupInformation[] ugi=new UserGroupInformation[3]; for (int i=0; i < ugi.length; i++) { ugi[i]=UserGroupInformation.createUserForTesting("user" + i,groups); } Mockito.doReturn(new HdfsFileStatus(0,false,1,1024,0,0,new FsPermission((short)777),"owner","group",new byte[0],new byte[0],1010,0,null)).when(mcp).getFileInfo(anyString()); Mockito.doReturn(new HdfsFileStatus(0,false,1,1024,0,0,new FsPermission((short)777),"owner","group",new byte[0],new byte[0],1010,0,null)).when(mcp).create(anyString(),(FsPermission)anyObject(),anyString(),(EnumSetWritable)anyObject(),anyBoolean(),anyShort(),anyLong(),(List)anyList()); final Configuration conf=new Configuration(); final DFSClient c1=createDFSClientAs(ugi[0],conf); FSDataOutputStream out1=createFsOut(c1,"/out1"); final DFSClient c2=createDFSClientAs(ugi[0],conf); FSDataOutputStream out2=createFsOut(c2,"/out2"); Assert.assertEquals(c1.getLeaseRenewer(),c2.getLeaseRenewer()); final DFSClient c3=createDFSClientAs(ugi[1],conf); FSDataOutputStream out3=createFsOut(c3,"/out3"); Assert.assertTrue(c1.getLeaseRenewer() != c3.getLeaseRenewer()); final DFSClient c4=createDFSClientAs(ugi[1],conf); FSDataOutputStream out4=createFsOut(c4,"/out4"); Assert.assertEquals(c3.getLeaseRenewer(),c4.getLeaseRenewer()); final DFSClient c5=createDFSClientAs(ugi[2],conf); FSDataOutputStream out5=createFsOut(c5,"/out5"); Assert.assertTrue(c1.getLeaseRenewer() != c5.getLeaseRenewer()); Assert.assertTrue(c3.getLeaseRenewer() != c5.getLeaseRenewer()); }

APIUtilityVerifier BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
@Test public void testLeaseAbort() throws Exception { MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); try { cluster.waitActive(); NamenodeProtocols preSpyNN=cluster.getNameNodeRpc(); NamenodeProtocols spyNN=spy(preSpyNN); DFSClient dfs=new DFSClient(null,spyNN,conf,null); byte[] buf=new byte[1024]; FSDataOutputStream c_out=createFsOut(dfs,dirString + "c"); c_out.write(buf,0,1024); c_out.close(); DFSInputStream c_in=dfs.open(dirString + "c"); FSDataOutputStream d_out=createFsOut(dfs,dirString + "d"); doThrow(new RemoteException(InvalidToken.class.getName(),"Your token is worthless")).when(spyNN).renewLease(anyString()); LeaseRenewer originalRenewer=dfs.getLeaseRenewer(); dfs.lastLeaseRenewal=Time.now() - HdfsConstants.LEASE_SOFTLIMIT_PERIOD - 1000; try { dfs.renewLease(); } catch ( IOException e) { } try { d_out.write(buf,0,1024); LOG.info("Write worked beyond the soft limit as expected."); } catch ( IOException e) { Assert.fail("Write failed."); } dfs.lastLeaseRenewal=Time.now() - HdfsConstants.LEASE_HARDLIMIT_PERIOD - 1000; dfs.renewLease(); try { d_out.write(buf,0,1024); d_out.close(); Assert.fail("Write did not fail even after the fatal lease renewal failure"); } catch ( IOException e) { LOG.info("Write failed as expected. ",e); } Thread.sleep(1000); Assert.assertTrue(originalRenewer.isEmpty()); doNothing().when(spyNN).renewLease(anyString()); try { int num=c_in.read(buf,0,1); if (num != 1) { Assert.fail("Failed to read 1 byte"); } c_in.close(); } catch ( IOException e) { LOG.error("Read failed with ",e); Assert.fail("Read after lease renewal failure failed"); } try { c_out=createFsOut(dfs,dirString + "c"); c_out.write(buf,0,1024); c_out.close(); } catch ( IOException e) { LOG.error("Write failed with ",e); Assert.fail("Write failed"); } } finally { cluster.shutdown(); } }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test that we can open up a file for write, move it to another location, * and then create a new file in the previous location, without causing any * lease conflicts. This is possible because we now use unique inode IDs * to identify files to the NameNode. */ @Test public void testLeaseAfterRenameAndRecreate() throws Exception { MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); try { final Path path1=new Path("/test-file"); final String contents1="contents1"; final Path path2=new Path("/test-file-new-location"); final String contents2="contents2"; FileSystem fs=cluster.getFileSystem(); FSDataOutputStream out1=fs.create(path1); out1.writeBytes(contents1); Assert.assertTrue(hasLease(cluster,path1)); Assert.assertEquals(1,leaseCount(cluster)); DistributedFileSystem fs2=(DistributedFileSystem)FileSystem.newInstance(fs.getUri(),fs.getConf()); fs2.rename(path1,path2); FSDataOutputStream out2=fs2.create(path1); out2.writeBytes(contents2); out2.close(); Assert.assertTrue(hasLease(cluster,path2)); out1.close(); DistributedFileSystem fs3=(DistributedFileSystem)FileSystem.newInstance(fs.getUri(),fs.getConf()); Assert.assertEquals(contents1,DFSTestUtil.readFile(fs3,path2)); Assert.assertEquals(contents2,DFSTestUtil.readFile(fs3,path1)); } finally { cluster.shutdown(); } }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testLeaseAfterRename() throws Exception { MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); try { Path p=new Path("/test-file"); Path d=new Path("/test-d"); Path d2=new Path("/test-d-other"); FileSystem fs=cluster.getFileSystem(); FSDataOutputStream out=fs.create(p); out.writeBytes("something"); Assert.assertTrue(hasLease(cluster,p)); Assert.assertEquals(1,leaseCount(cluster)); DistributedFileSystem fs2=(DistributedFileSystem)FileSystem.newInstance(fs.getUri(),fs.getConf()); LOG.info("DMS: rename file into dir"); Path pRenamed=new Path(d,p.getName()); fs2.mkdirs(d); fs2.rename(p,pRenamed); Assert.assertFalse(p + " exists",fs2.exists(p)); Assert.assertTrue(pRenamed + " not found",fs2.exists(pRenamed)); Assert.assertFalse("has lease for " + p,hasLease(cluster,p)); Assert.assertTrue("no lease for " + pRenamed,hasLease(cluster,pRenamed)); Assert.assertEquals(1,leaseCount(cluster)); LOG.info("DMS: rename parent dir"); Path pRenamedAgain=new Path(d2,pRenamed.getName()); fs2.rename(d,d2); Assert.assertFalse(d + " exists",fs2.exists(d)); Assert.assertFalse("has lease for " + pRenamed,hasLease(cluster,pRenamed)); Assert.assertTrue(d2 + " not found",fs2.exists(d2)); Assert.assertTrue(pRenamedAgain + " not found",fs2.exists(pRenamedAgain)); Assert.assertTrue("no lease for " + pRenamedAgain,hasLease(cluster,pRenamedAgain)); Assert.assertEquals(1,leaseCount(cluster)); LOG.info("DMS: rename parent again"); pRenamed=pRenamedAgain; pRenamedAgain=new Path(new Path(d,d2.getName()),p.getName()); fs2.mkdirs(d); fs2.rename(d2,d); Assert.assertFalse(d2 + " exists",fs2.exists(d2)); Assert.assertFalse("no lease for " + pRenamed,hasLease(cluster,pRenamed)); Assert.assertTrue(d + " not found",fs2.exists(d)); Assert.assertTrue(pRenamedAgain + " not found",fs2.exists(pRenamedAgain)); Assert.assertTrue("no lease for " + pRenamedAgain,hasLease(cluster,pRenamedAgain)); Assert.assertEquals(1,leaseCount(cluster)); pRenamed=pRenamedAgain; pRenamedAgain=new Path(d2,p.getName()); fs2.rename(pRenamed.getParent(),d2,Options.Rename.OVERWRITE); Assert.assertFalse(pRenamed.getParent() + " not found",fs2.exists(pRenamed.getParent())); Assert.assertFalse("has lease for " + pRenamed,hasLease(cluster,pRenamed)); Assert.assertTrue(d2 + " not found",fs2.exists(d2)); Assert.assertTrue(pRenamedAgain + " not found",fs2.exists(pRenamedAgain)); Assert.assertTrue("no lease for " + pRenamedAgain,hasLease(cluster,pRenamedAgain)); Assert.assertEquals(1,leaseCount(cluster)); pRenamed=pRenamedAgain; pRenamedAgain=new Path(d,p.getName()); fs2.rename(pRenamed.getParent(),d,Options.Rename.OVERWRITE); Assert.assertFalse(pRenamed.getParent() + " not found",fs2.exists(pRenamed.getParent())); Assert.assertFalse("has lease for " + pRenamed,hasLease(cluster,pRenamed)); Assert.assertTrue(d + " not found",fs2.exists(d)); Assert.assertTrue(pRenamedAgain + " not found",fs2.exists(pRenamedAgain)); Assert.assertTrue("no lease for " + pRenamedAgain,hasLease(cluster,pRenamedAgain)); Assert.assertEquals(1,leaseCount(cluster)); out.close(); } finally { cluster.shutdown(); } }

Class: org.apache.hadoop.hdfs.TestLeaseRecovery

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
/** * The following test first creates a file with a few blocks. * It randomly truncates the replica of the last block stored in each datanode. * Finally, it triggers block synchronization to synchronize all stored block. */ @Test public void testBlockSynchronization() throws Exception { final int ORG_FILE_SIZE=3000; Configuration conf=new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,BLOCK_SIZE); MiniDFSCluster cluster=null; try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(5).build(); cluster.waitActive(); DistributedFileSystem dfs=cluster.getFileSystem(); String filestr="/foo"; Path filepath=new Path(filestr); DFSTestUtil.createFile(dfs,filepath,ORG_FILE_SIZE,REPLICATION_NUM,0L); assertTrue(dfs.exists(filepath)); DFSTestUtil.waitReplication(dfs,filepath,REPLICATION_NUM); LocatedBlock locatedblock=TestInterDatanodeProtocol.getLastLocatedBlock(dfs.dfs.getNamenode(),filestr); DatanodeInfo[] datanodeinfos=locatedblock.getLocations(); assertEquals(REPLICATION_NUM,datanodeinfos.length); DataNode[] datanodes=new DataNode[REPLICATION_NUM]; for (int i=0; i < REPLICATION_NUM; i++) { datanodes[i]=cluster.getDataNode(datanodeinfos[i].getIpcPort()); assertTrue(datanodes[i] != null); } ExtendedBlock lastblock=locatedblock.getBlock(); DataNode.LOG.info("newblocks=" + lastblock); for (int i=0; i < REPLICATION_NUM; i++) { checkMetaInfo(lastblock,datanodes[i]); } DataNode.LOG.info("dfs.dfs.clientName=" + dfs.dfs.clientName); cluster.getNameNodeRpc().append(filestr,dfs.dfs.clientName); waitLeaseRecovery(cluster); Block[] updatedmetainfo=new Block[REPLICATION_NUM]; long oldSize=lastblock.getNumBytes(); lastblock=TestInterDatanodeProtocol.getLastLocatedBlock(dfs.dfs.getNamenode(),filestr).getBlock(); long currentGS=lastblock.getGenerationStamp(); for (int i=0; i < REPLICATION_NUM; i++) { updatedmetainfo[i]=DataNodeTestUtils.getFSDataset(datanodes[i]).getStoredBlock(lastblock.getBlockPoolId(),lastblock.getBlockId()); assertEquals(lastblock.getBlockId(),updatedmetainfo[i].getBlockId()); assertEquals(oldSize,updatedmetainfo[i].getNumBytes()); assertEquals(currentGS,updatedmetainfo[i].getGenerationStamp()); } System.out.println("Testing that lease recovery cannot happen during safemode."); filestr="/foo.safemode"; filepath=new Path(filestr); dfs.create(filepath,(short)1); cluster.getNameNodeRpc().setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER,false); assertTrue(dfs.dfs.exists(filestr)); DFSTestUtil.waitReplication(dfs,filepath,(short)1); waitLeaseRecovery(cluster); LeaseManager lm=NameNodeAdapter.getLeaseManager(cluster.getNamesystem()); assertTrue("Found " + lm.countLease() + " lease, expected 1",lm.countLease() == 1); cluster.getNameNodeRpc().setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE,false); } finally { if (cluster != null) { cluster.shutdown(); } } }

Class: org.apache.hadoop.hdfs.TestLeaseRecovery2

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
/** * This test makes the client does not renew its lease and also * set the hard lease expiration period to be short 1s. Thus triggering * lease expiration to happen while the client is still alive. * The test makes sure that the lease recovery completes and the client * fails if it continues to write to the file. * @throws Exception */ @Test public void testHardLeaseRecovery() throws Exception { String filestr="/hardLeaseRecovery"; AppendTestUtil.LOG.info("filestr=" + filestr); Path filepath=new Path(filestr); FSDataOutputStream stm=dfs.create(filepath,true,BUF_SIZE,REPLICATION_NUM,BLOCK_SIZE); assertTrue(dfs.dfs.exists(filestr)); int size=AppendTestUtil.nextInt(FILE_SIZE); AppendTestUtil.LOG.info("size=" + size); stm.write(buffer,0,size); AppendTestUtil.LOG.info("hflush"); stm.hflush(); AppendTestUtil.LOG.info("leasechecker.interruptAndJoin()"); dfs.dfs.getLeaseRenewer().interruptAndJoin(); cluster.setLeasePeriod(LONG_LEASE_PERIOD,SHORT_LEASE_PERIOD); LocatedBlocks locatedBlocks; do { Thread.sleep(SHORT_LEASE_PERIOD); locatedBlocks=dfs.dfs.getLocatedBlocks(filestr,0L,size); } while (locatedBlocks.isUnderConstruction()); assertEquals(size,locatedBlocks.getFileLength()); try { stm.write('b'); stm.close(); fail("Writer thread should have been killed"); } catch ( IOException e) { e.printStackTrace(); } AppendTestUtil.LOG.info("File size is good. Now validating sizes from datanodes..."); AppendTestUtil.checkFullFile(dfs,filepath,size,buffer,filestr); }

APIUtilityVerifier IterativeVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
/** * This test makes the client does not renew its lease and also * set the soft lease expiration period to be short 1s. Thus triggering * soft lease expiration to happen immediately by having another client * trying to create the same file. * The test makes sure that the lease recovery completes. * @throws Exception */ @Test public void testSoftLeaseRecovery() throws Exception { Map u2g_map=new HashMap(1); u2g_map.put(fakeUsername,new String[]{fakeGroup}); DFSTestUtil.updateConfWithFakeGroupMapping(conf,u2g_map); cluster.setLeasePeriod(HdfsConstants.LEASE_SOFTLIMIT_PERIOD,HdfsConstants.LEASE_HARDLIMIT_PERIOD); String filestr="/foo" + AppendTestUtil.nextInt(); AppendTestUtil.LOG.info("filestr=" + filestr); Path filepath=new Path(filestr); FSDataOutputStream stm=dfs.create(filepath,true,BUF_SIZE,REPLICATION_NUM,BLOCK_SIZE); assertTrue(dfs.dfs.exists(filestr)); int size=AppendTestUtil.nextInt(FILE_SIZE); AppendTestUtil.LOG.info("size=" + size); stm.write(buffer,0,size); AppendTestUtil.LOG.info("hflush"); stm.hflush(); AppendTestUtil.LOG.info("leasechecker.interruptAndJoin()"); dfs.dfs.getLeaseRenewer().interruptAndJoin(); cluster.setLeasePeriod(SHORT_LEASE_PERIOD,LONG_LEASE_PERIOD); { UserGroupInformation ugi=UserGroupInformation.createUserForTesting(fakeUsername,new String[]{fakeGroup}); FileSystem dfs2=DFSTestUtil.getFileSystemAs(ugi,conf); boolean done=false; for (int i=0; i < 10 && !done; i++) { AppendTestUtil.LOG.info("i=" + i); try { dfs2.create(filepath,false,BUF_SIZE,REPLICATION_NUM,BLOCK_SIZE); fail("Creation of an existing file should never succeed."); } catch ( FileAlreadyExistsException ex) { done=true; } catch ( AlreadyBeingCreatedException ex) { AppendTestUtil.LOG.info("GOOD! got " + ex.getMessage()); } catch ( IOException ioe) { AppendTestUtil.LOG.warn("UNEXPECTED IOException",ioe); } if (!done) { AppendTestUtil.LOG.info("sleep " + 5000 + "ms"); try { Thread.sleep(5000); } catch ( InterruptedException e) { } } } assertTrue(done); } AppendTestUtil.LOG.info("Lease for file " + filepath + " is recovered. "+ "Validating its contents now..."); long fileSize=dfs.getFileStatus(filepath).getLen(); assertTrue("File should be " + size + " bytes, but is actually "+ " found to be "+ fileSize+ " bytes",fileSize == size); AppendTestUtil.LOG.info("File size is good. " + "Now validating data and sizes from datanodes..."); AppendTestUtil.checkFullFile(dfs,filepath,size,buffer,filestr); }

Class: org.apache.hadoop.hdfs.TestLeaseRenewer

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testThreadName() throws Exception { DFSOutputStream mockStream=Mockito.mock(DFSOutputStream.class); long fileId=789L; Assert.assertFalse("Renewer not initially running",renewer.isRunning()); renewer.put(fileId,mockStream,MOCK_DFSCLIENT); Assert.assertTrue("Renewer should have started running",renewer.isRunning()); String threadName=renewer.getDaemonName(); Assert.assertEquals("LeaseRenewer:myuser@hdfs://nn1/",threadName); renewer.closeFile(fileId,MOCK_DFSCLIENT); renewer.setEmptyTime(Time.now()); long failTime=Time.now() + 5000; while (renewer.isRunning() && Time.now() < failTime) { Thread.sleep(50); } Assert.assertFalse(renewer.isRunning()); }

Class: org.apache.hadoop.hdfs.TestListFilesInFileContext

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test when input path is a file */ @Test public void testFile() throws IOException { fc.mkdir(TEST_DIR,FsPermission.getDefault(),true); writeFile(fc,FILE1,FILE_LEN); RemoteIterator itor=fc.util().listFiles(FILE1,true); LocatedFileStatus stat=itor.next(); assertFalse(itor.hasNext()); assertTrue(stat.isFile()); assertEquals(FILE_LEN,stat.getLen()); assertEquals(fc.makeQualified(FILE1),stat.getPath()); assertEquals(1,stat.getBlockLocations().length); itor=fc.util().listFiles(FILE1,false); stat=itor.next(); assertFalse(itor.hasNext()); assertTrue(stat.isFile()); assertEquals(FILE_LEN,stat.getLen()); assertEquals(fc.makeQualified(FILE1),stat.getPath()); assertEquals(1,stat.getBlockLocations().length); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test when input path is a directory */ @Test public void testDirectory() throws IOException { fc.mkdir(DIR1,FsPermission.getDefault(),true); RemoteIterator itor=fc.util().listFiles(DIR1,true); assertFalse(itor.hasNext()); itor=fc.util().listFiles(DIR1,false); assertFalse(itor.hasNext()); writeFile(fc,FILE2,FILE_LEN); itor=fc.util().listFiles(DIR1,true); LocatedFileStatus stat=itor.next(); assertFalse(itor.hasNext()); assertTrue(stat.isFile()); assertEquals(FILE_LEN,stat.getLen()); assertEquals(fc.makeQualified(FILE2),stat.getPath()); assertEquals(1,stat.getBlockLocations().length); itor=fc.util().listFiles(DIR1,false); stat=itor.next(); assertFalse(itor.hasNext()); assertTrue(stat.isFile()); assertEquals(FILE_LEN,stat.getLen()); assertEquals(fc.makeQualified(FILE2),stat.getPath()); assertEquals(1,stat.getBlockLocations().length); writeFile(fc,FILE1,FILE_LEN); writeFile(fc,FILE3,FILE_LEN); itor=fc.util().listFiles(TEST_DIR,true); stat=itor.next(); assertTrue(stat.isFile()); assertEquals(fc.makeQualified(FILE2),stat.getPath()); stat=itor.next(); assertTrue(stat.isFile()); assertEquals(fc.makeQualified(FILE3),stat.getPath()); stat=itor.next(); assertTrue(stat.isFile()); assertEquals(fc.makeQualified(FILE1),stat.getPath()); assertFalse(itor.hasNext()); itor=fc.util().listFiles(TEST_DIR,false); stat=itor.next(); assertTrue(stat.isFile()); assertEquals(fc.makeQualified(FILE1),stat.getPath()); assertFalse(itor.hasNext()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test when input patch has a symbolic links as its children */ @Test public void testSymbolicLinks() throws IOException { writeFile(fc,FILE1,FILE_LEN); writeFile(fc,FILE2,FILE_LEN); writeFile(fc,FILE3,FILE_LEN); Path dir4=new Path(TEST_DIR,"dir4"); Path dir5=new Path(dir4,"dir5"); Path file4=new Path(dir4,"file4"); fc.createSymlink(DIR1,dir5,true); fc.createSymlink(FILE1,file4,true); RemoteIterator itor=fc.util().listFiles(dir4,true); LocatedFileStatus stat=itor.next(); assertTrue(stat.isFile()); assertEquals(fc.makeQualified(FILE2),stat.getPath()); stat=itor.next(); assertTrue(stat.isFile()); assertEquals(fc.makeQualified(FILE3),stat.getPath()); stat=itor.next(); assertTrue(stat.isFile()); assertEquals(fc.makeQualified(FILE1),stat.getPath()); assertFalse(itor.hasNext()); itor=fc.util().listFiles(dir4,false); stat=itor.next(); assertTrue(stat.isFile()); assertEquals(fc.makeQualified(FILE1),stat.getPath()); assertFalse(itor.hasNext()); }

Class: org.apache.hadoop.hdfs.TestLocalDFS

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Tests get/set working directory in DFS. */ @Test(timeout=20000) public void testWorkingDirectory() throws IOException { Configuration conf=new HdfsConfiguration(); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build(); FileSystem fileSys=cluster.getFileSystem(); try { Path orig_path=fileSys.getWorkingDirectory(); assertTrue(orig_path.isAbsolute()); Path file1=new Path("somewhat/random.txt"); writeFile(fileSys,file1); assertTrue(fileSys.exists(new Path(orig_path,file1.toString()))); fileSys.delete(file1,true); Path subdir1=new Path("/somewhere"); fileSys.setWorkingDirectory(subdir1); writeFile(fileSys,file1); cleanupFile(fileSys,new Path(subdir1,file1.toString())); Path subdir2=new Path("else"); fileSys.setWorkingDirectory(subdir2); writeFile(fileSys,file1); readFile(fileSys,file1); cleanupFile(fileSys,new Path(new Path(subdir1,subdir2.toString()),file1.toString())); Path home=fileSys.makeQualified(new Path("/user/" + getUserName(fileSys))); Path fsHome=fileSys.getHomeDirectory(); assertEquals(home,fsHome); } finally { fileSys.close(); cluster.shutdown(); } }

Class: org.apache.hadoop.hdfs.TestMiniDFSCluster

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Bring up two clusters and assert that they are in different directories. * @throws Throwable on a failure */ @Test(timeout=100000) public void testDualClusters() throws Throwable { File testDataCluster2=new File(testDataPath,CLUSTER_2); File testDataCluster3=new File(testDataPath,CLUSTER_3); Configuration conf=new HdfsConfiguration(); String c2Path=testDataCluster2.getAbsolutePath(); conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR,c2Path); MiniDFSCluster cluster2=new MiniDFSCluster.Builder(conf).build(); MiniDFSCluster cluster3=null; try { String dataDir2=cluster2.getDataDirectory(); assertEquals(new File(c2Path + "/data"),new File(dataDir2)); conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR,testDataCluster3.getAbsolutePath()); MiniDFSCluster.Builder builder=new MiniDFSCluster.Builder(conf); cluster3=builder.build(); String dataDir3=cluster3.getDataDirectory(); assertTrue("Clusters are bound to the same directory: " + dataDir2,!dataDir2.equals(dataDir3)); } finally { MiniDFSCluster.shutdownCluster(cluster3); MiniDFSCluster.shutdownCluster(cluster2); } }

InternalCallVerifier AssumptionSetter EqualityVerifier HybridVerifier 
/** * MiniDFSCluster should not clobber dfs.datanode.hostname if requested */ @Test(timeout=100000) public void testClusterSetDatanodeHostname() throws Throwable { assumeTrue(System.getProperty("os.name").startsWith("Linux")); Configuration conf=new HdfsConfiguration(); conf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY,"MYHOST"); File testDataCluster5=new File(testDataPath,CLUSTER_5); String c5Path=testDataCluster5.getAbsolutePath(); conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR,c5Path); MiniDFSCluster cluster5=new MiniDFSCluster.Builder(conf).numDataNodes(1).checkDataNodeHostConfig(true).build(); try { assertEquals("DataNode hostname config not respected","MYHOST",cluster5.getDataNodes().get(0).getDatanodeId().getHostName()); } finally { MiniDFSCluster.shutdownCluster(cluster5); } }

Class: org.apache.hadoop.hdfs.TestMissingBlocksAlert

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testMissingBlocksAlert() throws IOException, InterruptedException, MalformedObjectNameException, AttributeNotFoundException, MBeanException, ReflectionException, InstanceNotFoundException { MiniDFSCluster cluster=null; try { Configuration conf=new HdfsConfiguration(); conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY,0); conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE,10); int fileLen=10 * 1024; conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,fileLen / 2); cluster=new MiniDFSCluster.Builder(conf).build(); cluster.waitActive(); final BlockManager bm=cluster.getNamesystem().getBlockManager(); DistributedFileSystem dfs=cluster.getFileSystem(); DFSTestUtil.createFile(dfs,new Path("/testMissingBlocksAlert/file1"),fileLen,(short)3,0); Path corruptFile=new Path("/testMissingBlocks/corruptFile"); DFSTestUtil.createFile(dfs,corruptFile,fileLen,(short)3,0); ExtendedBlock block=DFSTestUtil.getFirstBlock(dfs,corruptFile); assertTrue(TestDatanodeBlockScanner.corruptReplica(block,0)); FSDataInputStream in=dfs.open(corruptFile); try { in.readFully(new byte[fileLen]); } catch ( ChecksumException ignored) { } in.close(); LOG.info("Waiting for missing blocks count to increase..."); while (dfs.getMissingBlocksCount() <= 0) { Thread.sleep(100); } assertTrue(dfs.getMissingBlocksCount() == 1); assertEquals(4,dfs.getUnderReplicatedBlocksCount()); assertEquals(3,bm.getUnderReplicatedNotMissingBlocks()); MBeanServer mbs=ManagementFactory.getPlatformMBeanServer(); ObjectName mxbeanName=new ObjectName("Hadoop:service=NameNode,name=NameNodeInfo"); Assert.assertEquals(1,(long)(Long)mbs.getAttribute(mxbeanName,"NumberOfMissingBlocks")); dfs.delete(corruptFile,true); LOG.info("Waiting for missing blocks count to be zero..."); while (dfs.getMissingBlocksCount() > 0) { Thread.sleep(100); } assertEquals(2,dfs.getUnderReplicatedBlocksCount()); assertEquals(2,bm.getUnderReplicatedNotMissingBlocks()); Assert.assertEquals(0,(long)(Long)mbs.getAttribute(mxbeanName,"NumberOfMissingBlocks")); } finally { if (cluster != null) { cluster.shutdown(); } } }

Class: org.apache.hadoop.hdfs.TestModTime

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Regression test for HDFS-3864 - NN does not update internal file mtime for * OP_CLOSE when reading from the edit log. */ @Test public void testModTimePersistsAfterRestart() throws IOException { final long sleepTime=10; MiniDFSCluster cluster=null; FileSystem fs=null; Configuration conf=new HdfsConfiguration(); try { cluster=new MiniDFSCluster.Builder(conf).build(); fs=cluster.getFileSystem(); Path testPath=new Path("/test"); OutputStream out=fs.create(testPath); long initialModTime=fs.getFileStatus(testPath).getModificationTime(); assertTrue(initialModTime > 0); ThreadUtil.sleepAtLeastIgnoreInterrupts(sleepTime); out.close(); long modTimeAfterClose=fs.getFileStatus(testPath).getModificationTime(); assertTrue(modTimeAfterClose >= initialModTime + sleepTime); cluster.restartNameNode(); long modTimeAfterRestart=fs.getFileStatus(testPath).getModificationTime(); assertEquals(modTimeAfterClose,modTimeAfterRestart); } finally { if (fs != null) { fs.close(); } if (cluster != null) { cluster.shutdown(); } } }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Tests modification time in DFS. */ @Test public void testModTime() throws IOException { Configuration conf=new HdfsConfiguration(); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build(); cluster.waitActive(); InetSocketAddress addr=new InetSocketAddress("localhost",cluster.getNameNodePort()); DFSClient client=new DFSClient(addr,conf); DatanodeInfo[] info=client.datanodeReport(DatanodeReportType.LIVE); assertEquals("Number of Datanodes ",numDatanodes,info.length); FileSystem fileSys=cluster.getFileSystem(); int replicas=numDatanodes - 1; assertTrue(fileSys instanceof DistributedFileSystem); try { System.out.println("Creating testdir1 and testdir1/test1.dat."); Path dir1=new Path("testdir1"); Path file1=new Path(dir1,"test1.dat"); DFSTestUtil.createFile(fileSys,file1,fileSize,fileSize,blockSize,(short)replicas,seed); FileStatus stat=fileSys.getFileStatus(file1); long mtime1=stat.getModificationTime(); assertTrue(mtime1 != 0); stat=fileSys.getFileStatus(dir1); long mdir1=stat.getModificationTime(); System.out.println("Creating testdir1/test2.dat."); Path file2=new Path(dir1,"test2.dat"); DFSTestUtil.createFile(fileSys,file2,fileSize,fileSize,blockSize,(short)replicas,seed); stat=fileSys.getFileStatus(file2); stat=fileSys.getFileStatus(dir1); assertTrue(stat.getModificationTime() >= mdir1); mdir1=stat.getModificationTime(); Path dir2=fileSys.makeQualified(new Path("testdir2/")); System.out.println("Creating testdir2 " + dir2); assertTrue(fileSys.mkdirs(dir2)); stat=fileSys.getFileStatus(dir2); long mdir2=stat.getModificationTime(); Path newfile=new Path(dir2,"testnew.dat"); System.out.println("Moving " + file1 + " to "+ newfile); fileSys.rename(file1,newfile); stat=fileSys.getFileStatus(newfile); assertTrue(stat.getModificationTime() == mtime1); stat=fileSys.getFileStatus(dir1); assertTrue(stat.getModificationTime() != mdir1); mdir1=stat.getModificationTime(); stat=fileSys.getFileStatus(dir2); assertTrue(stat.getModificationTime() != mdir2); mdir2=stat.getModificationTime(); System.out.println("Deleting testdir2/testnew.dat."); assertTrue(fileSys.delete(newfile,true)); stat=fileSys.getFileStatus(dir1); assertTrue(stat.getModificationTime() == mdir1); stat=fileSys.getFileStatus(dir2); assertTrue(stat.getModificationTime() != mdir2); mdir2=stat.getModificationTime(); cleanupFile(fileSys,file2); cleanupFile(fileSys,dir1); cleanupFile(fileSys,dir2); } catch ( IOException e) { info=client.datanodeReport(DatanodeReportType.ALL); printDatanodeReport(info); throw e; } finally { fileSys.close(); cluster.shutdown(); } }

Class: org.apache.hadoop.hdfs.TestParallelShortCircuitRead

TestInitializer AssumptionSetter HybridVerifier 
@Before public void before(){ Assume.assumeThat(DomainSocket.getLoadingFailureReason(),equalTo(null)); }

Class: org.apache.hadoop.hdfs.TestParallelShortCircuitReadNoChecksum

TestInitializer AssumptionSetter HybridVerifier 
@Before public void before(){ Assume.assumeThat(DomainSocket.getLoadingFailureReason(),equalTo(null)); }

Class: org.apache.hadoop.hdfs.TestParallelShortCircuitReadUnCached

TestInitializer AssumptionSetter HybridVerifier 
@Before public void before(){ Assume.assumeThat(DomainSocket.getLoadingFailureReason(),equalTo(null)); }

Class: org.apache.hadoop.hdfs.TestParallelUnixDomainRead

TestInitializer AssumptionSetter HybridVerifier 
@Before public void before(){ Assume.assumeThat(DomainSocket.getLoadingFailureReason(),equalTo(null)); }

Class: org.apache.hadoop.hdfs.TestPeerCache

IterativeVerifier InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier 
@Test public void testEviction() throws Exception { final int CAPACITY=3; PeerCache cache=new PeerCache(CAPACITY,100000); DatanodeID dnIds[]=new DatanodeID[CAPACITY + 1]; FakePeer peers[]=new FakePeer[CAPACITY + 1]; for (int i=0; i < dnIds.length; ++i) { dnIds[i]=new DatanodeID("192.168.0.1","fakehostname_" + i,"fake_datanode_id_" + i,100,101,102,103); peers[i]=new FakePeer(dnIds[i],false); } for (int i=0; i < CAPACITY; ++i) { cache.put(dnIds[i],peers[i]); } assertEquals(CAPACITY,cache.size()); cache.put(dnIds[CAPACITY],peers[CAPACITY]); assertEquals(CAPACITY,cache.size()); assertSame(null,cache.get(dnIds[0],false)); for (int i=1; i < CAPACITY; ++i) { Peer peer=cache.get(dnIds[i],false); assertSame(peers[i],peer); assertTrue(!peer.isClosed()); peer.close(); } assertEquals(1,cache.size()); cache.close(); }

IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testDomainSocketPeers() throws Exception { final int CAPACITY=3; PeerCache cache=new PeerCache(CAPACITY,100000); DatanodeID dnId=new DatanodeID("192.168.0.1","fakehostname","fake_datanode_id",100,101,102,103); HashMultiset peers=HashMultiset.create(CAPACITY); for (int i=0; i < CAPACITY; ++i) { FakePeer peer=new FakePeer(dnId,i == CAPACITY - 1); peers.add(peer); cache.put(dnId,peer); } assertEquals(CAPACITY,cache.size()); Peer peer=cache.get(dnId,true); assertTrue(peer.getDomainSocket() != null); peers.remove(peer); peer=cache.get(dnId,true); assertTrue(peer == null); while (!peers.isEmpty()) { peer=cache.get(dnId,false); assertTrue(peer != null); assertTrue(!peer.isClosed()); peers.remove(peer); } assertEquals(0,cache.size()); cache.close(); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testAddAndRetrieve() throws Exception { PeerCache cache=new PeerCache(3,100000); DatanodeID dnId=new DatanodeID("192.168.0.1","fakehostname","fake_datanode_id",100,101,102,103); FakePeer peer=new FakePeer(dnId,false); cache.put(dnId,peer); assertTrue(!peer.isClosed()); assertEquals(1,cache.size()); assertEquals(peer,cache.get(dnId,false)); assertEquals(0,cache.size()); cache.close(); }

IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testExpiry() throws Exception { final int CAPACITY=3; final int EXPIRY_PERIOD=10; PeerCache cache=new PeerCache(CAPACITY,EXPIRY_PERIOD); DatanodeID dnIds[]=new DatanodeID[CAPACITY]; FakePeer peers[]=new FakePeer[CAPACITY]; for (int i=0; i < CAPACITY; ++i) { dnIds[i]=new DatanodeID("192.168.0.1","fakehostname_" + i,"fake_datanode_id",100,101,102,103); peers[i]=new FakePeer(dnIds[i],false); } for (int i=0; i < CAPACITY; ++i) { cache.put(dnIds[i],peers[i]); } Thread.sleep(EXPIRY_PERIOD * 50); assertEquals(0,cache.size()); for (int i=0; i < CAPACITY; ++i) { assertTrue(peers[i].isClosed()); } Thread.sleep(EXPIRY_PERIOD * 50); cache.close(); }

IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testMultiplePeersWithSameKey() throws Exception { final int CAPACITY=3; PeerCache cache=new PeerCache(CAPACITY,100000); DatanodeID dnId=new DatanodeID("192.168.0.1","fakehostname","fake_datanode_id",100,101,102,103); HashMultiset peers=HashMultiset.create(CAPACITY); for (int i=0; i < CAPACITY; ++i) { FakePeer peer=new FakePeer(dnId,false); peers.add(peer); cache.put(dnId,peer); } assertEquals(CAPACITY,cache.size()); while (!peers.isEmpty()) { Peer peer=cache.get(dnId,false); assertTrue(peer != null); assertTrue(!peer.isClosed()); peers.remove(peer); } assertEquals(0,cache.size()); cache.close(); }

Class: org.apache.hadoop.hdfs.TestPersistBlocks

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
@Test public void testRestartDfsWithAbandonedBlock() throws Exception { final Configuration conf=new HdfsConfiguration(); conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,0); MiniDFSCluster cluster=null; long len=0; FSDataOutputStream stream; try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); FileSystem fs=cluster.getFileSystem(); stream=fs.create(FILE_PATH,true,BLOCK_SIZE,(short)1,BLOCK_SIZE); stream.write(DATA_BEFORE_RESTART); stream.hflush(); while (len < BLOCK_SIZE * (NUM_BLOCKS - 1)) { FileStatus status=fs.getFileStatus(FILE_PATH); len=status.getLen(); Thread.sleep(100); } DFSClient dfsclient=DFSClientAdapter.getDFSClient((DistributedFileSystem)fs); HdfsFileStatus fileStatus=dfsclient.getNamenode().getFileInfo(FILE_NAME); LocatedBlocks blocks=dfsclient.getNamenode().getBlockLocations(FILE_NAME,0,BLOCK_SIZE * NUM_BLOCKS); assertEquals(NUM_BLOCKS,blocks.getLocatedBlocks().size()); LocatedBlock b=blocks.getLastLocatedBlock(); dfsclient.getNamenode().abandonBlock(b.getBlock(),fileStatus.getFileId(),FILE_NAME,dfsclient.clientName); cluster.restartNameNode(); FileStatus status=fs.getFileStatus(FILE_PATH); assertTrue("Length incorrect: " + status.getLen(),status.getLen() == len - BLOCK_SIZE); FSDataInputStream readStream=fs.open(FILE_PATH); try { byte[] verifyBuf=new byte[DATA_BEFORE_RESTART.length - BLOCK_SIZE]; IOUtils.readFully(readStream,verifyBuf,0,verifyBuf.length); byte[] expectedBuf=new byte[DATA_BEFORE_RESTART.length - BLOCK_SIZE]; System.arraycopy(DATA_BEFORE_RESTART,0,expectedBuf,0,expectedBuf.length); assertArrayEquals(expectedBuf,verifyBuf); } finally { IOUtils.closeStream(readStream); } } finally { if (cluster != null) { cluster.shutdown(); } } }

Class: org.apache.hadoop.hdfs.TestPipelines

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Creates and closes a file of certain length. * Calls append to allow next write() operation to add to the end of it * After write() invocation, calls hflush() to make sure that data sunk through * the pipeline and check the state of the last block's replica. * It supposes to be in RBW state * @throws IOException in case of an error */ @Test public void pipeline_01() throws IOException { final String METHOD_NAME=GenericTestUtils.getMethodName(); if (LOG.isDebugEnabled()) { LOG.debug("Running " + METHOD_NAME); } Path filePath=new Path("/" + METHOD_NAME + ".dat"); DFSTestUtil.createFile(fs,filePath,FILE_SIZE,REPL_FACTOR,rand.nextLong()); if (LOG.isDebugEnabled()) { LOG.debug("Invoking append but doing nothing otherwise..."); } FSDataOutputStream ofs=fs.append(filePath); ofs.writeBytes("Some more stuff to write"); ((DFSOutputStream)ofs.getWrappedStream()).hflush(); List lb=cluster.getNameNodeRpc().getBlockLocations(filePath.toString(),FILE_SIZE - 1,FILE_SIZE).getLocatedBlocks(); String bpid=cluster.getNamesystem().getBlockPoolId(); for ( DataNode dn : cluster.getDataNodes()) { Replica r=DataNodeTestUtils.fetchReplicaInfo(dn,bpid,lb.get(0).getBlock().getBlockId()); assertTrue("Replica on DN " + dn + " shouldn't be null",r != null); assertEquals("Should be RBW replica on " + dn + " after sequence of calls append()/write()/hflush()",HdfsServerConstants.ReplicaState.RBW,r.getState()); } ofs.close(); }

Class: org.apache.hadoop.hdfs.TestPread

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testHedgedReadLoopTooManyTimes() throws IOException { Configuration conf=new Configuration(); int numHedgedReadPoolThreads=5; final int hedgedReadTimeoutMillis=50; conf.setInt(DFSConfigKeys.DFS_DFSCLIENT_HEDGED_READ_THREADPOOL_SIZE,numHedgedReadPoolThreads); conf.setLong(DFSConfigKeys.DFS_DFSCLIENT_HEDGED_READ_THRESHOLD_MILLIS,hedgedReadTimeoutMillis); conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE,0); DFSClientFaultInjector.instance=Mockito.mock(DFSClientFaultInjector.class); DFSClientFaultInjector injector=DFSClientFaultInjector.instance; final int sleepMs=100; Mockito.doAnswer(new Answer(){ @Override public Void answer( InvocationOnMock invocation) throws Throwable { if (true) { Thread.sleep(hedgedReadTimeoutMillis + sleepMs); if (DFSClientFaultInjector.exceptionNum.compareAndSet(0,1)) { System.out.println("-------------- throw Checksum Exception"); throw new ChecksumException("ChecksumException test",100); } } return null; } } ).when(injector).fetchFromDatanodeException(); Mockito.doAnswer(new Answer(){ @Override public Void answer( InvocationOnMock invocation) throws Throwable { if (true) { Thread.sleep(sleepMs * 2); } return null; } } ).when(injector).readFromDatanodeDelay(); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).format(true).build(); DistributedFileSystem fileSys=cluster.getFileSystem(); DFSClient dfsClient=fileSys.getClient(); FSDataOutputStream output=null; DFSInputStream input=null; String filename="/hedgedReadMaxOut.dat"; try { Path file=new Path(filename); output=fileSys.create(file,(short)2); byte[] data=new byte[64 * 1024]; output.write(data); output.flush(); output.write(data); output.flush(); output.write(data); output.flush(); output.close(); byte[] buffer=new byte[64 * 1024]; input=dfsClient.open(filename); input.read(0,buffer,0,1024); input.close(); assertEquals(3,input.getHedgedReadOpsLoopNumForTesting()); } catch ( BlockMissingException e) { assertTrue(false); } finally { Mockito.reset(injector); IOUtils.cleanup(null,input); IOUtils.cleanup(null,output); fileSys.close(); cluster.shutdown(); } }

Class: org.apache.hadoop.hdfs.TestQuota

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Violate a space quota using files of size < 1 block. Test that block * allocation conservatively assumes that for quota checking the entire * space of the block is used. */ @Test public void testBlockAllocationAdjustsUsageConservatively() throws Exception { Configuration conf=new HdfsConfiguration(); final int BLOCK_SIZE=6 * 1024; conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,BLOCK_SIZE); conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY,true); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); cluster.waitActive(); FileSystem fs=cluster.getFileSystem(); DFSAdmin admin=new DFSAdmin(conf); final String nnAddr=conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY); final String webhdfsuri=WebHdfsFileSystem.SCHEME + "://" + nnAddr; System.out.println("webhdfsuri=" + webhdfsuri); final FileSystem webhdfs=new Path(webhdfsuri).getFileSystem(conf); try { Path dir=new Path("/test"); Path file1=new Path("/test/test1"); Path file2=new Path("/test/test2"); boolean exceededQuota=false; final int QUOTA_SIZE=3 * BLOCK_SIZE; final int FILE_SIZE=BLOCK_SIZE / 2; ContentSummary c; assertTrue(fs.mkdirs(dir)); runCommand(admin,false,"-setSpaceQuota",Integer.toString(QUOTA_SIZE),dir.toString()); DFSTestUtil.createFile(fs,file1,FILE_SIZE,(short)3,1L); DFSTestUtil.waitReplication(fs,file1,(short)3); c=fs.getContentSummary(dir); checkContentSummary(c,webhdfs.getContentSummary(dir)); assertEquals("Quota is half consumed",QUOTA_SIZE / 2,c.getSpaceConsumed()); try { DFSTestUtil.createFile(fs,file2,FILE_SIZE,(short)3,1L); } catch ( QuotaExceededException e) { exceededQuota=true; } assertTrue("Quota not exceeded",exceededQuota); } finally { cluster.shutdown(); } }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test HDFS operations that change disk space consumed by a directory tree. * namely create, rename, delete, append, and setReplication. * This is based on testNamespaceCommands() above. */ @Test public void testSpaceCommands() throws Exception { final Configuration conf=new HdfsConfiguration(); conf.set(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,"512"); conf.setInt(DFSConfigKeys.DFS_CONTENT_SUMMARY_LIMIT_KEY,2); final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); final FileSystem fs=cluster.getFileSystem(); assertTrue("Not a HDFS: " + fs.getUri(),fs instanceof DistributedFileSystem); final DistributedFileSystem dfs=(DistributedFileSystem)fs; try { int fileLen=1024; short replication=3; int fileSpace=fileLen * replication; assertTrue(dfs.mkdirs(new Path("/nqdir0/qdir1/qdir20/nqdir30"))); final Path quotaDir1=new Path("/nqdir0/qdir1"); dfs.setQuota(quotaDir1,HdfsConstants.QUOTA_DONT_SET,4 * fileSpace); ContentSummary c=dfs.getContentSummary(quotaDir1); assertEquals(c.getSpaceQuota(),4 * fileSpace); final Path quotaDir20=new Path("/nqdir0/qdir1/qdir20"); dfs.setQuota(quotaDir20,HdfsConstants.QUOTA_DONT_SET,6 * fileSpace); c=dfs.getContentSummary(quotaDir20); assertEquals(c.getSpaceQuota(),6 * fileSpace); final Path quotaDir21=new Path("/nqdir0/qdir1/qdir21"); assertTrue(dfs.mkdirs(quotaDir21)); dfs.setQuota(quotaDir21,HdfsConstants.QUOTA_DONT_SET,2 * fileSpace); c=dfs.getContentSummary(quotaDir21); assertEquals(c.getSpaceQuota(),2 * fileSpace); Path tempPath=new Path(quotaDir21,"nqdir32"); assertTrue(dfs.mkdirs(tempPath)); DFSTestUtil.createFile(dfs,new Path(tempPath,"fileDir/file1"),fileLen,replication,0); c=dfs.getContentSummary(quotaDir21); assertEquals(c.getSpaceConsumed(),fileSpace); boolean hasException=false; try { DFSTestUtil.createFile(dfs,new Path(quotaDir21,"nqdir33/file2"),2 * fileLen,replication,0); } catch ( DSQuotaExceededException e) { hasException=true; } assertTrue(hasException); assertTrue(dfs.delete(new Path(quotaDir21,"nqdir33"),true)); c=dfs.getContentSummary(quotaDir21); assertEquals(c.getSpaceConsumed(),fileSpace); assertEquals(c.getSpaceQuota(),2 * fileSpace); c=dfs.getContentSummary(quotaDir20); assertEquals(c.getSpaceConsumed(),0); Path dstPath=new Path(quotaDir20,"nqdir30"); Path srcPath=new Path(quotaDir21,"nqdir32"); assertTrue(dfs.rename(srcPath,dstPath)); c=dfs.getContentSummary(quotaDir20); assertEquals(c.getSpaceConsumed(),fileSpace); c=dfs.getContentSummary(quotaDir1); assertEquals(c.getSpaceConsumed(),fileSpace); c=dfs.getContentSummary(quotaDir21); assertEquals(c.getSpaceConsumed(),0); final Path file2=new Path(dstPath,"fileDir/file2"); int file2Len=2 * fileLen; DFSTestUtil.createFile(dfs,file2,file2Len,replication,0); c=dfs.getContentSummary(quotaDir20); assertEquals(c.getSpaceConsumed(),3 * fileSpace); c=dfs.getContentSummary(quotaDir21); assertEquals(c.getSpaceConsumed(),0); hasException=false; try { assertFalse(dfs.rename(dstPath,srcPath)); } catch ( DSQuotaExceededException e) { hasException=true; } assertTrue(hasException); assertFalse(dfs.exists(srcPath)); assertTrue(dfs.exists(dstPath)); c=dfs.getContentSummary(quotaDir20); assertEquals(c.getSpaceConsumed(),3 * fileSpace); c=dfs.getContentSummary(quotaDir21); assertEquals(c.getSpaceConsumed(),0); c=dfs.getContentSummary(quotaDir1); assertEquals(c.getSpaceQuota(),4 * fileSpace); c=dfs.getContentSummary(dstPath); assertEquals(c.getSpaceConsumed(),3 * fileSpace); OutputStream out=dfs.append(file2); out.write(new byte[fileLen]); out.close(); file2Len+=fileLen; c=dfs.getContentSummary(dstPath); assertEquals(c.getSpaceConsumed(),4 * fileSpace); dfs.setQuota(quotaDir1,HdfsConstants.QUOTA_DONT_SET,5 * fileSpace); out=dfs.append(file2); hasException=false; try { out.write(new byte[fileLen + 1024]); out.flush(); out.close(); } catch ( DSQuotaExceededException e) { hasException=true; IOUtils.closeStream(out); } assertTrue(hasException); file2Len+=fileLen; c=dfs.getContentSummary(dstPath); assertEquals(c.getSpaceConsumed(),5 * fileSpace); dfs.setReplication(file2,(short)(replication - 1)); c=dfs.getContentSummary(dstPath); assertEquals(c.getSpaceConsumed(),5 * fileSpace - file2Len); hasException=false; try { dfs.setReplication(file2,(short)(replication + 1)); } catch ( DSQuotaExceededException e) { hasException=true; } assertTrue(hasException); c=dfs.getContentSummary(dstPath); assertEquals(c.getSpaceConsumed(),5 * fileSpace - file2Len); dfs.setQuota(quotaDir1,HdfsConstants.QUOTA_DONT_SET,10 * fileSpace); dfs.setQuota(quotaDir20,HdfsConstants.QUOTA_DONT_SET,10 * fileSpace); dfs.setReplication(file2,(short)(replication + 1)); c=dfs.getContentSummary(dstPath); assertEquals(c.getSpaceConsumed(),5 * fileSpace + file2Len); final Path quotaDir2053=new Path("/hdfs-2053"); assertTrue(dfs.mkdirs(quotaDir2053)); final Path quotaDir2053_A=new Path(quotaDir2053,"A"); assertTrue(dfs.mkdirs(quotaDir2053_A)); final Path quotaDir2053_B=new Path(quotaDir2053,"B"); assertTrue(dfs.mkdirs(quotaDir2053_B)); final Path quotaDir2053_C=new Path(quotaDir2053,"C"); assertTrue(dfs.mkdirs(quotaDir2053_C)); int sizeFactorA=1; int sizeFactorB=2; int sizeFactorC=4; dfs.setQuota(quotaDir2053_C,HdfsConstants.QUOTA_DONT_SET,(sizeFactorC + 1) * fileSpace); c=dfs.getContentSummary(quotaDir2053_C); assertEquals(c.getSpaceQuota(),(sizeFactorC + 1) * fileSpace); DFSTestUtil.createFile(dfs,new Path(quotaDir2053_A,"fileA"),sizeFactorA * fileLen,replication,0); c=dfs.getContentSummary(quotaDir2053_A); assertEquals(c.getSpaceConsumed(),sizeFactorA * fileSpace); DFSTestUtil.createFile(dfs,new Path(quotaDir2053_B,"fileB"),sizeFactorB * fileLen,replication,0); c=dfs.getContentSummary(quotaDir2053_B); assertEquals(c.getSpaceConsumed(),sizeFactorB * fileSpace); DFSTestUtil.createFile(dfs,new Path(quotaDir2053_C,"fileC"),sizeFactorC * fileLen,replication,0); c=dfs.getContentSummary(quotaDir2053_C); assertEquals(c.getSpaceConsumed(),sizeFactorC * fileSpace); c=dfs.getContentSummary(quotaDir2053); assertEquals(c.getSpaceConsumed(),(sizeFactorA + sizeFactorB + sizeFactorC) * fileSpace); assertEquals(20,cluster.getNamesystem().getFSDirectory().getYieldCount()); } finally { cluster.shutdown(); } }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Like the previous test but create many files. This covers bugs where * the quota adjustment is incorrect but it takes many files to accrue * a big enough accounting error to violate the quota. */ @Test public void testMultipleFilesSmallerThanOneBlock() throws Exception { Configuration conf=new HdfsConfiguration(); final int BLOCK_SIZE=6 * 1024; conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,BLOCK_SIZE); conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY,true); conf.setInt(DFSConfigKeys.DFS_CONTENT_SUMMARY_LIMIT_KEY,2); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); cluster.waitActive(); FileSystem fs=cluster.getFileSystem(); DFSAdmin admin=new DFSAdmin(conf); final String nnAddr=conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY); final String webhdfsuri=WebHdfsFileSystem.SCHEME + "://" + nnAddr; System.out.println("webhdfsuri=" + webhdfsuri); final FileSystem webhdfs=new Path(webhdfsuri).getFileSystem(conf); try { long nsQuota=FSImageTestUtil.getNSQuota(cluster.getNameNode().getNamesystem()); assertTrue("Default namespace quota expected as long max. But the value is :" + nsQuota,nsQuota == Long.MAX_VALUE); Path dir=new Path("/test"); boolean exceededQuota=false; ContentSummary c; final int FILE_SIZE=1024; final int QUOTA_SIZE=32 * (int)fs.getDefaultBlockSize(dir); assertEquals(6 * 1024,fs.getDefaultBlockSize(dir)); assertEquals(192 * 1024,QUOTA_SIZE); assertTrue(fs.mkdirs(dir)); runCommand(admin,false,"-setSpaceQuota",Integer.toString(QUOTA_SIZE),dir.toString()); for (int i=0; i < 59; i++) { Path file=new Path("/test/test" + i); DFSTestUtil.createFile(fs,file,FILE_SIZE,(short)3,1L); DFSTestUtil.waitReplication(fs,file,(short)3); } c=fs.getContentSummary(dir); checkContentSummary(c,webhdfs.getContentSummary(dir)); assertEquals("Invalid space consumed",59 * FILE_SIZE * 3,c.getSpaceConsumed()); assertEquals("Invalid space consumed",QUOTA_SIZE - (59 * FILE_SIZE * 3),3 * (fs.getDefaultBlockSize(dir) - FILE_SIZE)); try { Path file=new Path("/test/test59"); DFSTestUtil.createFile(fs,file,FILE_SIZE,(short)3,1L); DFSTestUtil.waitReplication(fs,file,(short)3); } catch ( QuotaExceededException e) { exceededQuota=true; } assertTrue("Quota not exceeded",exceededQuota); assertEquals(2,cluster.getNamesystem().getFSDirectory().getYieldCount()); } finally { cluster.shutdown(); } }

UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
/** * Test limit cases for setting space quotas. */ @Test public void testMaxSpaceQuotas() throws Exception { final Configuration conf=new HdfsConfiguration(); final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); try { final FileSystem fs=cluster.getFileSystem(); assertTrue("Not a HDFS: " + fs.getUri(),fs instanceof DistributedFileSystem); final DistributedFileSystem dfs=(DistributedFileSystem)fs; final Path testFolder=new Path("/testFolder"); assertTrue(dfs.mkdirs(testFolder)); dfs.setQuota(testFolder,Long.MAX_VALUE - 1,10); ContentSummary c=dfs.getContentSummary(testFolder); assertTrue("Quota not set properly",c.getQuota() == Long.MAX_VALUE - 1); dfs.setQuota(testFolder,10,Long.MAX_VALUE - 1); c=dfs.getContentSummary(testFolder); assertTrue("Quota not set properly",c.getSpaceQuota() == Long.MAX_VALUE - 1); dfs.setQuota(testFolder,Long.MAX_VALUE,10); c=dfs.getContentSummary(testFolder); assertTrue("Quota should not have changed",c.getQuota() == 10); dfs.setQuota(testFolder,10,Long.MAX_VALUE); c=dfs.getContentSummary(testFolder); assertTrue("Quota should not have changed",c.getSpaceQuota() == 10); try { dfs.setQuota(testFolder,Long.MAX_VALUE + 1,10); fail("Exception not thrown"); } catch ( IllegalArgumentException e) { } try { dfs.setQuota(testFolder,10,Long.MAX_VALUE + 1); fail("Exception not thrown"); } catch ( IllegalArgumentException e) { } } finally { cluster.shutdown(); } }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test commands that change the size of the name space: * mkdirs, rename, and delete */ @Test public void testNamespaceCommands() throws Exception { final Configuration conf=new HdfsConfiguration(); conf.setInt(DFSConfigKeys.DFS_CONTENT_SUMMARY_LIMIT_KEY,2); final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); final DistributedFileSystem dfs=cluster.getFileSystem(); try { assertTrue(dfs.mkdirs(new Path("/nqdir0/qdir1/qdir20/nqdir30"))); final Path quotaDir1=new Path("/nqdir0/qdir1"); dfs.setQuota(quotaDir1,6,HdfsConstants.QUOTA_DONT_SET); ContentSummary c=dfs.getContentSummary(quotaDir1); assertEquals(c.getDirectoryCount(),3); assertEquals(c.getQuota(),6); final Path quotaDir2=new Path("/nqdir0/qdir1/qdir20"); dfs.setQuota(quotaDir2,7,HdfsConstants.QUOTA_DONT_SET); c=dfs.getContentSummary(quotaDir2); assertEquals(c.getDirectoryCount(),2); assertEquals(c.getQuota(),7); final Path quotaDir3=new Path("/nqdir0/qdir1/qdir21"); assertTrue(dfs.mkdirs(quotaDir3)); dfs.setQuota(quotaDir3,2,HdfsConstants.QUOTA_DONT_SET); c=dfs.getContentSummary(quotaDir3); assertEquals(c.getDirectoryCount(),1); assertEquals(c.getQuota(),2); Path tempPath=new Path(quotaDir3,"nqdir32"); assertTrue(dfs.mkdirs(tempPath)); c=dfs.getContentSummary(quotaDir3); assertEquals(c.getDirectoryCount(),2); assertEquals(c.getQuota(),2); tempPath=new Path(quotaDir3,"nqdir33"); boolean hasException=false; try { assertFalse(dfs.mkdirs(tempPath)); } catch ( NSQuotaExceededException e) { hasException=true; } assertTrue(hasException); c=dfs.getContentSummary(quotaDir3); assertEquals(c.getDirectoryCount(),2); assertEquals(c.getQuota(),2); tempPath=new Path(quotaDir2,"nqdir31"); assertTrue(dfs.mkdirs(tempPath)); c=dfs.getContentSummary(quotaDir2); assertEquals(c.getDirectoryCount(),3); assertEquals(c.getQuota(),7); c=dfs.getContentSummary(quotaDir1); assertEquals(c.getDirectoryCount(),6); assertEquals(c.getQuota(),6); tempPath=new Path(quotaDir2,"nqdir33"); hasException=false; try { assertFalse(dfs.mkdirs(tempPath)); } catch ( NSQuotaExceededException e) { hasException=true; } assertTrue(hasException); tempPath=new Path(quotaDir2,"nqdir30"); dfs.rename(new Path(quotaDir3,"nqdir32"),tempPath); c=dfs.getContentSummary(quotaDir2); assertEquals(c.getDirectoryCount(),4); assertEquals(c.getQuota(),7); c=dfs.getContentSummary(quotaDir1); assertEquals(c.getDirectoryCount(),6); assertEquals(c.getQuota(),6); hasException=false; try { assertFalse(dfs.rename(tempPath,quotaDir3)); } catch ( NSQuotaExceededException e) { hasException=true; } assertTrue(hasException); assertTrue(dfs.exists(tempPath)); assertFalse(dfs.exists(new Path(quotaDir3,"nqdir30"))); hasException=false; try { assertFalse(dfs.rename(tempPath,new Path(quotaDir3,"nqdir32"))); } catch ( QuotaExceededException e) { hasException=true; } assertTrue(hasException); assertTrue(dfs.exists(tempPath)); assertFalse(dfs.exists(new Path(quotaDir3,"nqdir32"))); assertTrue(dfs.rename(tempPath,new Path("/nqdir0"))); c=dfs.getContentSummary(quotaDir2); assertEquals(c.getDirectoryCount(),2); assertEquals(c.getQuota(),7); c=dfs.getContentSummary(quotaDir1); assertEquals(c.getDirectoryCount(),4); assertEquals(c.getQuota(),6); assertTrue(dfs.mkdirs(new Path("/nqdir0/nqdir30/nqdir33"))); hasException=false; try { assertFalse(dfs.rename(new Path("/nqdir0/nqdir30"),tempPath)); } catch ( NSQuotaExceededException e) { hasException=true; } assertTrue(hasException); assertTrue(dfs.rename(quotaDir3,quotaDir2)); c=dfs.getContentSummary(quotaDir1); assertEquals(c.getDirectoryCount(),4); assertEquals(c.getQuota(),6); c=dfs.getContentSummary(quotaDir2); assertEquals(c.getDirectoryCount(),3); assertEquals(c.getQuota(),7); tempPath=new Path(quotaDir2,"qdir21"); c=dfs.getContentSummary(tempPath); assertEquals(c.getDirectoryCount(),1); assertEquals(c.getQuota(),2); dfs.delete(tempPath,true); c=dfs.getContentSummary(quotaDir2); assertEquals(c.getDirectoryCount(),2); assertEquals(c.getQuota(),7); c=dfs.getContentSummary(quotaDir1); assertEquals(c.getDirectoryCount(),3); assertEquals(c.getQuota(),6); assertTrue(dfs.rename(new Path("/nqdir0/nqdir30"),quotaDir2)); c=dfs.getContentSummary(quotaDir2); assertEquals(c.getDirectoryCount(),5); assertEquals(c.getQuota(),7); c=dfs.getContentSummary(quotaDir1); assertEquals(c.getDirectoryCount(),6); assertEquals(c.getQuota(),6); assertEquals(14,cluster.getNamesystem().getFSDirectory().getYieldCount()); } finally { cluster.shutdown(); } }

UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test quota related commands: * setQuota, clrQuota, setSpaceQuota, clrSpaceQuota, and count */ @Test public void testQuotaCommands() throws Exception { final Configuration conf=new HdfsConfiguration(); final int DEFAULT_BLOCK_SIZE=512; conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,DEFAULT_BLOCK_SIZE); conf.setInt(DFSConfigKeys.DFS_CONTENT_SUMMARY_LIMIT_KEY,2); final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); final FileSystem fs=cluster.getFileSystem(); assertTrue("Not a HDFS: " + fs.getUri(),fs instanceof DistributedFileSystem); final DistributedFileSystem dfs=(DistributedFileSystem)fs; DFSAdmin admin=new DFSAdmin(conf); try { final int fileLen=1024; final short replication=5; final long spaceQuota=fileLen * replication * 15 / 8; final Path parent=new Path("/test"); assertTrue(dfs.mkdirs(parent)); String[] args=new String[]{"-setQuota","3",parent.toString()}; runCommand(admin,args,false); runCommand(admin,false,"-setSpaceQuota","2t",parent.toString()); assertEquals(2L << 40,dfs.getContentSummary(parent).getSpaceQuota()); runCommand(admin,false,"-setSpaceQuota",Long.toString(spaceQuota),parent.toString()); final Path childDir0=new Path(parent,"data0"); assertTrue(dfs.mkdirs(childDir0)); final Path childFile0=new Path(parent,"datafile0"); DFSTestUtil.createFile(fs,childFile0,fileLen,replication,0); ContentSummary c=dfs.getContentSummary(parent); assertEquals(c.getFileCount() + c.getDirectoryCount(),3); assertEquals(c.getQuota(),3); assertEquals(c.getSpaceConsumed(),fileLen * replication); assertEquals(c.getSpaceQuota(),spaceQuota); c=dfs.getContentSummary(childDir0); assertEquals(c.getFileCount() + c.getDirectoryCount(),1); assertEquals(c.getQuota(),-1); c=dfs.getContentSummary(parent); assertEquals(c.getSpaceConsumed(),fileLen * replication); final Path childDir1=new Path(parent,"data1"); boolean hasException=false; try { assertFalse(dfs.mkdirs(childDir1)); } catch ( QuotaExceededException e) { hasException=true; } assertTrue(hasException); OutputStream fout; final Path childFile1=new Path(parent,"datafile1"); hasException=false; try { fout=dfs.create(childFile1); } catch ( QuotaExceededException e) { hasException=true; } assertTrue(hasException); runCommand(admin,new String[]{"-clrQuota",parent.toString()},false); c=dfs.getContentSummary(parent); assertEquals(c.getQuota(),-1); assertEquals(c.getSpaceQuota(),spaceQuota); runCommand(admin,new String[]{"-clrQuota",childDir0.toString()},false); c=dfs.getContentSummary(childDir0); assertEquals(c.getQuota(),-1); fout=dfs.create(childFile1,replication); try { fout.write(new byte[fileLen]); fout.close(); Assert.fail(); } catch ( QuotaExceededException e) { IOUtils.closeStream(fout); } dfs.delete(childFile1,false); runCommand(admin,false,"-clrSpaceQuota",parent.toString()); c=dfs.getContentSummary(parent); assertEquals(c.getQuota(),-1); assertEquals(c.getSpaceQuota(),-1); DFSTestUtil.createFile(dfs,childFile1,fileLen,replication,0); args=new String[]{"-setQuota","1",parent.toString()}; runCommand(admin,args,false); runCommand(admin,false,"-setSpaceQuota",Integer.toString(fileLen),args[2]); args=new String[]{"-setQuota","1",childDir0.toString()}; runCommand(admin,args,false); hasException=false; try { assertFalse(dfs.mkdirs(new Path(childDir0,"in"))); } catch ( QuotaExceededException e) { hasException=true; } assertTrue(hasException); c=dfs.getContentSummary(childDir0); assertEquals(c.getDirectoryCount() + c.getFileCount(),1); assertEquals(c.getQuota(),1); Path nonExistentPath=new Path("/test1"); assertFalse(dfs.exists(nonExistentPath)); args=new String[]{"-setQuota","1",nonExistentPath.toString()}; runCommand(admin,args,true); runCommand(admin,true,"-setSpaceQuota","1g",nonExistentPath.toString()); assertTrue(dfs.isFile(childFile0)); args[1]=childFile0.toString(); runCommand(admin,args,true); runCommand(admin,true,"-setSpaceQuota","1t",args[1]); args[0]="-clrQuota"; runCommand(admin,args,true); runCommand(admin,true,"-clrSpaceQuota",args[1]); args[1]=nonExistentPath.toString(); runCommand(admin,args,true); runCommand(admin,true,"-clrSpaceQuota",args[1]); args=new String[]{"-setQuota","0",parent.toString()}; runCommand(admin,args,true); runCommand(admin,true,"-setSpaceQuota","0",args[2]); args[1]="-1"; runCommand(admin,args,true); runCommand(admin,true,"-setSpaceQuota",args[1],args[2]); args[1]=String.valueOf(Long.MAX_VALUE + 1L); runCommand(admin,args,true); runCommand(admin,true,"-setSpaceQuota",args[1],args[2]); args[1]="33aa1.5"; runCommand(admin,args,true); runCommand(admin,true,"-setSpaceQuota",args[1],args[2]); runCommand(admin,true,"-setSpaceQuota",(Long.MAX_VALUE / 1024 / 1024 + 1024) + "m",args[2]); final String username="userxx"; UserGroupInformation ugi=UserGroupInformation.createUserForTesting(username,new String[]{"groupyy"}); final String[] args2=args.clone(); ugi.doAs(new PrivilegedExceptionAction(){ @Override public Object run() throws Exception { assertEquals("Not running as new user",username,UserGroupInformation.getCurrentUser().getShortUserName()); DFSAdmin userAdmin=new DFSAdmin(conf); args2[1]="100"; runCommand(userAdmin,args2,true); runCommand(userAdmin,true,"-setSpaceQuota","1g",args2[2]); String[] args3=new String[]{"-clrQuota",parent.toString()}; runCommand(userAdmin,args3,true); runCommand(userAdmin,true,"-clrSpaceQuota",args3[1]); return null; } } ); runCommand(admin,true,"-clrQuota","/"); runCommand(admin,false,"-setQuota","1000000","/"); runCommand(admin,true,"-clrQuota","/"); runCommand(admin,false,"-clrSpaceQuota","/"); runCommand(admin,new String[]{"-clrQuota",parent.toString()},false); runCommand(admin,false,"-clrSpaceQuota",parent.toString()); final Path childDir2=new Path(parent,"data2"); assertTrue(dfs.mkdirs(childDir2)); final Path childFile2=new Path(childDir2,"datafile2"); final Path childFile3=new Path(childDir2,"datafile3"); final long spaceQuota2=DEFAULT_BLOCK_SIZE * replication; final long fileLen2=DEFAULT_BLOCK_SIZE; runCommand(admin,false,"-setSpaceQuota",Long.toString(spaceQuota2),childDir2.toString()); runCommand(admin,false,"-clrSpaceQuota",childDir2.toString()); DFSTestUtil.createFile(fs,childFile2,fileLen2,replication,0); runCommand(admin,false,"-setSpaceQuota",Long.toString(spaceQuota2),childDir2.toString()); hasException=false; try { DFSTestUtil.createFile(fs,childFile3,fileLen2,replication,0); } catch ( DSQuotaExceededException e) { hasException=true; } assertTrue(hasException); final Path childFile4=new Path("/","datafile2"); final Path childFile5=new Path("/","datafile3"); runCommand(admin,true,"-clrQuota","/"); runCommand(admin,false,"-clrSpaceQuota","/"); runCommand(admin,false,"-setSpaceQuota",Long.toString(spaceQuota2),"/"); runCommand(admin,false,"-clrSpaceQuota","/"); DFSTestUtil.createFile(fs,childFile4,fileLen2,replication,0); runCommand(admin,false,"-setSpaceQuota",Long.toString(spaceQuota2),"/"); hasException=false; try { DFSTestUtil.createFile(fs,childFile5,fileLen2,replication,0); } catch ( DSQuotaExceededException e) { hasException=true; } assertTrue(hasException); assertEquals(4,cluster.getNamesystem().getFSDirectory().getYieldCount()); } finally { cluster.shutdown(); } }

Class: org.apache.hadoop.hdfs.TestReplaceDatanodeOnFailure

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testAppend() throws Exception { final Configuration conf=new HdfsConfiguration(); final short REPLICATION=(short)3; Assert.assertEquals(ReplaceDatanodeOnFailure.DEFAULT,ReplaceDatanodeOnFailure.get(conf)); final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); try { final DistributedFileSystem fs=cluster.getFileSystem(); final Path f=new Path(DIR,"testAppend"); { LOG.info("create an empty file " + f); fs.create(f,REPLICATION).close(); final FileStatus status=fs.getFileStatus(f); Assert.assertEquals(REPLICATION,status.getReplication()); Assert.assertEquals(0L,status.getLen()); } final byte[] bytes=new byte[1000]; { LOG.info("append " + bytes.length + " bytes to "+ f); final FSDataOutputStream out=fs.append(f); out.write(bytes); out.close(); final FileStatus status=fs.getFileStatus(f); Assert.assertEquals(REPLICATION,status.getReplication()); Assert.assertEquals(bytes.length,status.getLen()); } { LOG.info("append another " + bytes.length + " bytes to "+ f); try { final FSDataOutputStream out=fs.append(f); out.write(bytes); out.close(); Assert.fail(); } catch ( IOException ioe) { LOG.info("This exception is expected",ioe); } } } finally { if (cluster != null) { cluster.shutdown(); } } }

Class: org.apache.hadoop.hdfs.TestReplication

APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testPendingReplicationRetry() throws IOException { MiniDFSCluster cluster=null; int numDataNodes=4; String testFile="/replication-test-file"; Path testPath=new Path(testFile); byte buffer[]=new byte[1024]; for (int i=0; i < buffer.length; i++) { buffer[i]='1'; } try { Configuration conf=new HdfsConfiguration(); conf.set(DFSConfigKeys.DFS_REPLICATION_KEY,Integer.toString(numDataNodes)); cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build(); cluster.waitActive(); DFSClient dfsClient=new DFSClient(new InetSocketAddress("localhost",cluster.getNameNodePort()),conf); OutputStream out=cluster.getFileSystem().create(testPath); out.write(buffer); out.close(); waitForBlockReplication(testFile,dfsClient.getNamenode(),numDataNodes,-1); ExtendedBlock block=dfsClient.getNamenode().getBlockLocations(testFile,0,Long.MAX_VALUE).get(0).getBlock(); cluster.shutdown(); cluster=null; for (int i=0; i < 25; i++) { buffer[i]='0'; } int fileCount=0; for (int dnIndex=0; dnIndex < 3; dnIndex++) { File blockFile=MiniDFSCluster.getBlockFile(dnIndex,block); LOG.info("Checking for file " + blockFile); if (blockFile != null && blockFile.exists()) { if (fileCount == 0) { LOG.info("Deleting file " + blockFile); assertTrue(blockFile.delete()); } else { LOG.info("Corrupting file " + blockFile); long len=blockFile.length(); assertTrue(len > 50); RandomAccessFile blockOut=new RandomAccessFile(blockFile,"rw"); try { blockOut.seek(len / 3); blockOut.write(buffer,0,25); } finally { blockOut.close(); } } fileCount++; } } assertEquals(3,fileCount); LOG.info("Restarting minicluster after deleting a replica and corrupting 2 crcs"); conf=new HdfsConfiguration(); conf.set(DFSConfigKeys.DFS_REPLICATION_KEY,Integer.toString(numDataNodes)); conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY,Integer.toString(2)); conf.set("dfs.datanode.block.write.timeout.sec",Integer.toString(5)); conf.set(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY,"0.75f"); cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes * 2).format(false).build(); cluster.waitActive(); dfsClient=new DFSClient(new InetSocketAddress("localhost",cluster.getNameNodePort()),conf); waitForBlockReplication(testFile,dfsClient.getNamenode(),numDataNodes,-1); } finally { if (cluster != null) { cluster.shutdown(); } } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testBadBlockReportOnTransfer() throws Exception { Configuration conf=new HdfsConfiguration(); FileSystem fs=null; DFSClient dfsClient=null; LocatedBlocks blocks=null; int replicaCount=0; short replFactor=1; MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); cluster.waitActive(); fs=cluster.getFileSystem(); dfsClient=new DFSClient(new InetSocketAddress("localhost",cluster.getNameNodePort()),conf); Path file1=new Path("/tmp/testBadBlockReportOnTransfer/file1"); DFSTestUtil.createFile(fs,file1,1024,replFactor,0); DFSTestUtil.waitReplication(fs,file1,replFactor); ExtendedBlock block=DFSTestUtil.getFirstBlock(fs,file1); int blockFilesCorrupted=cluster.corruptBlockOnDataNodes(block); assertEquals("Corrupted too few blocks",replFactor,blockFilesCorrupted); replFactor=2; fs.setReplication(file1,replFactor); blocks=dfsClient.getNamenode().getBlockLocations(file1.toString(),0,Long.MAX_VALUE); while (blocks.get(0).isCorrupt() != true) { try { LOG.info("Waiting until block is marked as corrupt..."); Thread.sleep(1000); } catch ( InterruptedException ie) { } blocks=dfsClient.getNamenode().getBlockLocations(file1.toString(),0,Long.MAX_VALUE); } replicaCount=blocks.get(0).getLocations().length; assertTrue(replicaCount == 1); cluster.shutdown(); }

Class: org.apache.hadoop.hdfs.TestReservedRawPaths

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test(timeout=120000) public void testListDotReserved() throws Exception { final Path baseFileRaw=new Path("/.reserved/raw/base"); final int len=8192; DFSTestUtil.createFile(fs,baseFileRaw,len,(short)1,0xFEED); try { fs.listStatus(new Path("/.reserved")); fail("expected FNFE"); } catch ( FileNotFoundException e) { assertExceptionContains("/.reserved does not exist",e); } try { fs.listStatus(new Path("/.reserved/.inodes")); fail("expected FNFE"); } catch ( FileNotFoundException e) { assertExceptionContains("/.reserved/.inodes does not exist",e); } final FileStatus[] fileStatuses=fs.listStatus(new Path("/.reserved/raw")); assertEquals("expected 1 entry",fileStatuses.length,1); assertMatches(fileStatuses[0].getPath().toString(),"/.reserved/raw/base"); }

Class: org.apache.hadoop.hdfs.TestRollingUpgrade

UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
@Test(timeout=300000) public void testCheckpoint() throws IOException, InterruptedException { final Configuration conf=new Configuration(); conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY,1); conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY,1); MiniQJMHACluster cluster=null; final Path foo=new Path("/foo"); try { cluster=new MiniQJMHACluster.Builder(conf).build(); MiniDFSCluster dfsCluster=cluster.getDfsCluster(); dfsCluster.waitActive(); dfsCluster.transitionToActive(0); DistributedFileSystem dfs=dfsCluster.getFileSystem(0); RollingUpgradeInfo info=dfs.rollingUpgrade(RollingUpgradeAction.PREPARE); Assert.assertTrue(info.isStarted()); queryForPreparation(dfs); dfs.mkdirs(foo); long txid=dfs.rollEdits(); Assert.assertTrue(txid > 0); int retries=0; while (++retries < 5) { NNStorage storage=dfsCluster.getNamesystem(1).getFSImage().getStorage(); if (storage.getFsImageName(txid - 1) != null) { return; } Thread.sleep(1000); } Assert.fail("new checkpoint does not exist"); } finally { if (cluster != null) { cluster.shutdown(); } } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testRollingUpgradeWithQJM() throws Exception { String nnDirPrefix=MiniDFSCluster.getBaseDirectory() + "/nn/"; final File nn1Dir=new File(nnDirPrefix + "image1"); final File nn2Dir=new File(nnDirPrefix + "image2"); LOG.info("nn1Dir=" + nn1Dir); LOG.info("nn2Dir=" + nn2Dir); final Configuration conf=new HdfsConfiguration(); final MiniJournalCluster mjc=new MiniJournalCluster.Builder(conf).build(); setConf(conf,nn1Dir,mjc); { final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).manageNameDfsDirs(false).checkExitOnShutdown(false).build(); cluster.shutdown(); } MiniDFSCluster cluster2=null; try { FileUtil.fullyDelete(nn2Dir); FileUtil.copy(nn1Dir,FileSystem.getLocal(conf).getRaw(),new Path(nn2Dir.getAbsolutePath()),false,conf); final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false).manageNameDfsDirs(false).checkExitOnShutdown(false).build(); final Path foo=new Path("/foo"); final Path bar=new Path("/bar"); final Path baz=new Path("/baz"); final RollingUpgradeInfo info1; { final DistributedFileSystem dfs=cluster.getFileSystem(); dfs.mkdirs(foo); dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER); info1=dfs.rollingUpgrade(RollingUpgradeAction.PREPARE); dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE); LOG.info("START\n" + info1); Assert.assertEquals(info1,dfs.rollingUpgrade(RollingUpgradeAction.QUERY)); dfs.mkdirs(bar); cluster.shutdown(); } final Configuration conf2=setConf(new Configuration(),nn2Dir,mjc); cluster2=new MiniDFSCluster.Builder(conf2).numDataNodes(0).format(false).manageNameDfsDirs(false).build(); final DistributedFileSystem dfs2=cluster2.getFileSystem(); Assert.assertTrue(dfs2.exists(foo)); Assert.assertTrue(dfs2.exists(bar)); Assert.assertFalse(dfs2.exists(baz)); Assert.assertEquals(info1,dfs2.rollingUpgrade(RollingUpgradeAction.QUERY)); dfs2.mkdirs(baz); LOG.info("RESTART cluster 2"); cluster2.restartNameNode(); Assert.assertEquals(info1,dfs2.rollingUpgrade(RollingUpgradeAction.QUERY)); Assert.assertTrue(dfs2.exists(foo)); Assert.assertTrue(dfs2.exists(bar)); Assert.assertTrue(dfs2.exists(baz)); try { cluster2.restartNameNode("-upgrade"); } catch ( IOException e) { LOG.info("The exception is expected.",e); } LOG.info("RESTART cluster 2 again"); cluster2.restartNameNode(); Assert.assertEquals(info1,dfs2.rollingUpgrade(RollingUpgradeAction.QUERY)); Assert.assertTrue(dfs2.exists(foo)); Assert.assertTrue(dfs2.exists(bar)); Assert.assertTrue(dfs2.exists(baz)); final RollingUpgradeInfo finalize=dfs2.rollingUpgrade(RollingUpgradeAction.FINALIZE); LOG.info("FINALIZE: " + finalize); Assert.assertEquals(info1.getStartTime(),finalize.getStartTime()); LOG.info("RESTART cluster 2 with regular startup option"); cluster2.getNameNodeInfos()[0].setStartOpt(StartupOption.REGULAR); cluster2.restartNameNode(); Assert.assertTrue(dfs2.exists(foo)); Assert.assertTrue(dfs2.exists(bar)); Assert.assertTrue(dfs2.exists(baz)); } finally { if (cluster2 != null) cluster2.shutdown(); } }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testDFSAdminDatanodeUpgradeControlCommands() throws Exception { final Configuration conf=new HdfsConfiguration(); MiniDFSCluster cluster=null; try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); final DFSAdmin dfsadmin=new DFSAdmin(conf); DataNode dn=cluster.getDataNodes().get(0); final String dnAddr=dn.getDatanodeId().getIpcAddr(false); final String[] args1={"-getDatanodeInfo",dnAddr}; Assert.assertEquals(0,dfsadmin.run(args1)); final String[] args2={"-shutdownDatanode",dnAddr,"upgrade"}; Assert.assertEquals(0,dfsadmin.run(args2)); Thread.sleep(2000); Assert.assertFalse("DataNode should exit",dn.isDatanodeUp()); Assert.assertEquals(-1,dfsadmin.run(args1)); } finally { if (cluster != null) cluster.shutdown(); } }

Class: org.apache.hadoop.hdfs.TestRollingUpgradeRollback

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testRollbackCommand() throws Exception { final Configuration conf=new HdfsConfiguration(); MiniDFSCluster cluster=null; final Path foo=new Path("/foo"); final Path bar=new Path("/bar"); try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).build(); cluster.waitActive(); final DistributedFileSystem dfs=cluster.getFileSystem(); final DFSAdmin dfsadmin=new DFSAdmin(conf); dfs.mkdirs(foo); dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER); Assert.assertEquals(0,dfsadmin.run(new String[]{"-rollingUpgrade","prepare"})); dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE); dfs.mkdirs(bar); NNStorage storage=cluster.getNamesystem().getFSImage().getStorage(); checkNNStorage(storage,3,-1); } finally { if (cluster != null) { cluster.shutdown(); } } NameNode nn=null; try { nn=NameNode.createNameNode(new String[]{"-rollingUpgrade","rollback"},conf); INode fooNode=nn.getNamesystem().getFSDirectory().getINode4Write(foo.toString()); Assert.assertNotNull(fooNode); INode barNode=nn.getNamesystem().getFSDirectory().getINode4Write(bar.toString()); Assert.assertNull(barNode); NNStorage storage=nn.getNamesystem().getFSImage().getStorage(); checkNNStorage(storage,3,7); } finally { if (nn != null) { nn.stop(); nn.join(); } } }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testRollbackWithQJM() throws Exception { final Configuration conf=new HdfsConfiguration(); MiniJournalCluster mjc=null; MiniDFSCluster cluster=null; final Path foo=new Path("/foo"); final Path bar=new Path("/bar"); try { mjc=new MiniJournalCluster.Builder(conf).numJournalNodes(NUM_JOURNAL_NODES).build(); conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,mjc.getQuorumJournalURI(JOURNAL_ID).toString()); cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).build(); cluster.waitActive(); DistributedFileSystem dfs=cluster.getFileSystem(); final DFSAdmin dfsadmin=new DFSAdmin(conf); dfs.mkdirs(foo); dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER); Assert.assertEquals(0,dfsadmin.run(new String[]{"-rollingUpgrade","prepare"})); dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE); dfs.mkdirs(bar); dfs.close(); cluster.restartNameNode("-rollingUpgrade","rollback"); dfs=cluster.getFileSystem(); Assert.assertTrue(dfs.exists(foo)); Assert.assertFalse(dfs.exists(bar)); for (int i=0; i < NUM_JOURNAL_NODES; i++) { File dir=mjc.getCurrentDir(0,JOURNAL_ID); checkJNStorage(dir,4,7); } } finally { if (cluster != null) { cluster.shutdown(); } if (mjc != null) { mjc.shutdown(); } } }

Class: org.apache.hadoop.hdfs.TestSafeMode

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test that the NN initializes its under-replicated blocks queue * before it is ready to exit safemode (HDFS-1476) */ @Test(timeout=45000) public void testInitializeReplQueuesEarly() throws Exception { LOG.info("Starting testInitializeReplQueuesEarly"); BlockManagerTestUtil.setWritingPrefersLocalNode(cluster.getNamesystem().getBlockManager(),false); cluster.startDataNodes(conf,2,true,StartupOption.REGULAR,null); cluster.waitActive(); LOG.info("Creating files"); DFSTestUtil.createFile(fs,TEST_PATH,15 * BLOCK_SIZE,(short)1,1L); LOG.info("Stopping all DataNodes"); List dnprops=Lists.newLinkedList(); dnprops.add(cluster.stopDataNode(0)); dnprops.add(cluster.stopDataNode(0)); dnprops.add(cluster.stopDataNode(0)); cluster.getConfiguration(0).setFloat(DFSConfigKeys.DFS_NAMENODE_REPL_QUEUE_THRESHOLD_PCT_KEY,1f / 15f); LOG.info("Restarting NameNode"); cluster.restartNameNode(); final NameNode nn=cluster.getNameNode(); String status=nn.getNamesystem().getSafemode(); assertEquals("Safe mode is ON. The reported blocks 0 needs additional " + "15 blocks to reach the threshold 0.9990 of total blocks 15.\n" + "The number of live datanodes 0 has reached the minimum number 0. "+ "Safe mode will be turned off automatically once the thresholds "+ "have been reached.",status); assertFalse("Mis-replicated block queues should not be initialized " + "until threshold is crossed",NameNodeAdapter.safeModeInitializedReplQueues(nn)); LOG.info("Restarting one DataNode"); cluster.restartDataNode(dnprops.remove(0)); GenericTestUtils.waitFor(new Supplier(){ @Override public Boolean get(){ return getLongCounter("StorageBlockReportOps",getMetrics(NN_METRICS)) == cluster.getStoragesPerDatanode(); } } ,10,10000); final int safe=NameNodeAdapter.getSafeModeSafeBlocks(nn); assertTrue("Expected first block report to make some blocks safe.",safe > 0); assertTrue("Did not expect first block report to make all blocks safe.",safe < 15); assertTrue(NameNodeAdapter.safeModeInitializedReplQueues(nn)); BlockManagerTestUtil.updateState(nn.getNamesystem().getBlockManager()); long underReplicatedBlocks=nn.getNamesystem().getUnderReplicatedBlocks(); while (underReplicatedBlocks != (15 - safe)) { LOG.info("UnderReplicatedBlocks expected=" + (15 - safe) + ", actual="+ underReplicatedBlocks); Thread.sleep(100); BlockManagerTestUtil.updateState(nn.getNamesystem().getBlockManager()); underReplicatedBlocks=nn.getNamesystem().getUnderReplicatedBlocks(); } cluster.restartDataNodes(); }

UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
/** * Run various fs operations while the NN is in safe mode, * assert that they are either allowed or fail as expected. */ @Test public void testOperationsWhileInSafeMode() throws IOException, InterruptedException { final Path file1=new Path("/file1"); assertFalse(dfs.setSafeMode(SafeModeAction.SAFEMODE_GET)); DFSTestUtil.createFile(fs,file1,1024,(short)1,0); assertTrue("Could not enter SM",dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER)); runFsFun("Set quota while in SM",new FSRun(){ @Override public void run( FileSystem fs) throws IOException { ((DistributedFileSystem)fs).setQuota(file1,1,1); } } ); runFsFun("Set perm while in SM",new FSRun(){ @Override public void run( FileSystem fs) throws IOException { fs.setPermission(file1,FsPermission.getDefault()); } } ); runFsFun("Set owner while in SM",new FSRun(){ @Override public void run( FileSystem fs) throws IOException { fs.setOwner(file1,"user","group"); } } ); runFsFun("Set repl while in SM",new FSRun(){ @Override public void run( FileSystem fs) throws IOException { fs.setReplication(file1,(short)1); } } ); runFsFun("Append file while in SM",new FSRun(){ @Override public void run( FileSystem fs) throws IOException { DFSTestUtil.appendFile(fs,file1,"new bytes"); } } ); runFsFun("Delete file while in SM",new FSRun(){ @Override public void run( FileSystem fs) throws IOException { fs.delete(file1,false); } } ); runFsFun("Rename file while in SM",new FSRun(){ @Override public void run( FileSystem fs) throws IOException { fs.rename(file1,new Path("file2")); } } ); runFsFun("Set time while in SM",new FSRun(){ @Override public void run( FileSystem fs) throws IOException { fs.setTimes(file1,0,0); } } ); runFsFun("modifyAclEntries while in SM",new FSRun(){ @Override public void run( FileSystem fs) throws IOException { fs.modifyAclEntries(file1,Lists.newArrayList()); } } ); runFsFun("removeAclEntries while in SM",new FSRun(){ @Override public void run( FileSystem fs) throws IOException { fs.removeAclEntries(file1,Lists.newArrayList()); } } ); runFsFun("removeDefaultAcl while in SM",new FSRun(){ @Override public void run( FileSystem fs) throws IOException { fs.removeDefaultAcl(file1); } } ); runFsFun("removeAcl while in SM",new FSRun(){ @Override public void run( FileSystem fs) throws IOException { fs.removeAcl(file1); } } ); runFsFun("setAcl while in SM",new FSRun(){ @Override public void run( FileSystem fs) throws IOException { fs.setAcl(file1,Lists.newArrayList()); } } ); runFsFun("setXAttr while in SM",new FSRun(){ @Override public void run( FileSystem fs) throws IOException { fs.setXAttr(file1,"user.a1",null); } } ); runFsFun("removeXAttr while in SM",new FSRun(){ @Override public void run( FileSystem fs) throws IOException { fs.removeXAttr(file1,"user.a1"); } } ); try { DFSTestUtil.readFile(fs,file1); } catch ( IOException ioe) { fail("Set times failed while in SM"); } try { fs.getAclStatus(file1); } catch ( IOException ioe) { fail("getAclStatus failed while in SM"); } UserGroupInformation ugiX=UserGroupInformation.createRemoteUser("userX"); FileSystem myfs=ugiX.doAs(new PrivilegedExceptionAction(){ @Override public FileSystem run() throws IOException { return FileSystem.get(conf); } } ); myfs.access(file1,FsAction.READ); try { myfs.access(file1,FsAction.WRITE); fail("The access call should have failed."); } catch ( AccessControlException e) { } assertFalse("Could not leave SM",dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE)); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Verify that the NameNode stays in safemode when dfs.safemode.datanode.min * is set to a number greater than the number of live datanodes. */ @Test public void testDatanodeThreshold() throws IOException { cluster.shutdown(); Configuration conf=cluster.getConfiguration(0); conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY,0); conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_MIN_DATANODES_KEY,1); cluster.restartNameNode(); fs=cluster.getFileSystem(); String tipMsg=cluster.getNamesystem().getSafemode(); assertTrue("Safemode tip message doesn't look right: " + tipMsg,tipMsg.contains("The number of live datanodes 0 needs an additional " + "1 live datanodes to reach the minimum number 1.\n" + "Safe mode will be turned off automatically")); cluster.startDataNodes(conf,1,true,null,null); try { Thread.sleep(1000); } catch ( InterruptedException ignored) { } assertEquals("",cluster.getNamesystem().getSafemode()); }

Class: org.apache.hadoop.hdfs.TestSeekBug

InternalCallVerifier EqualityVerifier ExceptionVerifier HybridVerifier 
/** * Test (expected to throw IOE) for negative * FSDataInpuStream#seek argument */ @Test(expected=IOException.class) public void testNegativeSeek() throws IOException { Configuration conf=new HdfsConfiguration(); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build(); FileSystem fs=cluster.getFileSystem(); try { Path seekFile=new Path("seekboundaries.dat"); DFSTestUtil.createFile(fs,seekFile,ONEMB,ONEMB,fs.getDefaultBlockSize(seekFile),fs.getDefaultReplication(seekFile),seed); FSDataInputStream stream=fs.open(seekFile); stream.seek(65536); assertEquals(65536,stream.getPos()); stream.seek(-73); } finally { fs.close(); cluster.shutdown(); } }

InternalCallVerifier EqualityVerifier ExceptionVerifier HybridVerifier 
/** * Test (expected to throw IOE) for FSDataInpuStream#seek * when the position argument is larger than the file size. */ @Test(expected=IOException.class) public void testSeekPastFileSize() throws IOException { Configuration conf=new HdfsConfiguration(); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build(); FileSystem fs=cluster.getFileSystem(); try { Path seekFile=new Path("seekboundaries.dat"); DFSTestUtil.createFile(fs,seekFile,ONEMB,ONEMB,fs.getDefaultBlockSize(seekFile),fs.getDefaultReplication(seekFile),seed); FSDataInputStream stream=fs.open(seekFile); stream.seek(65536); assertEquals(65536,stream.getPos()); stream.seek(ONEMB + ONEMB + ONEMB); } finally { fs.close(); cluster.shutdown(); } }

Class: org.apache.hadoop.hdfs.TestSetTimes

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Tests mod time change at close in DFS. */ @Test public void testTimesAtClose() throws IOException { Configuration conf=new HdfsConfiguration(); final int MAX_IDLE_TIME=2000; int replicas=1; conf.setInt("ipc.client.connection.maxidletime",MAX_IDLE_TIME); conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,1000); conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,1); conf.setInt(DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY,50); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build(); cluster.waitActive(); InetSocketAddress addr=new InetSocketAddress("localhost",cluster.getNameNodePort()); DFSClient client=new DFSClient(addr,conf); DatanodeInfo[] info=client.datanodeReport(DatanodeReportType.LIVE); assertEquals("Number of Datanodes ",numDatanodes,info.length); FileSystem fileSys=cluster.getFileSystem(); assertTrue(fileSys instanceof DistributedFileSystem); try { Path file1=new Path("/simple.dat"); FSDataOutputStream stm=writeFile(fileSys,file1,replicas); System.out.println("Created and wrote file simple.dat"); FileStatus statBeforeClose=fileSys.getFileStatus(file1); long mtimeBeforeClose=statBeforeClose.getModificationTime(); String mdateBeforeClose=dateForm.format(new Date(mtimeBeforeClose)); System.out.println("mtime on " + file1 + " before close is "+ mdateBeforeClose+ " ("+ mtimeBeforeClose+ ")"); assertTrue(mtimeBeforeClose != 0); stm.close(); System.out.println("Closed file."); FileStatus statAfterClose=fileSys.getFileStatus(file1); long mtimeAfterClose=statAfterClose.getModificationTime(); String mdateAfterClose=dateForm.format(new Date(mtimeAfterClose)); System.out.println("mtime on " + file1 + " after close is "+ mdateAfterClose+ " ("+ mtimeAfterClose+ ")"); assertTrue(mtimeAfterClose != 0); assertTrue(mtimeBeforeClose != mtimeAfterClose); cleanupFile(fileSys,file1); } catch ( IOException e) { info=client.datanodeReport(DatanodeReportType.ALL); printDatanodeReport(info); throw e; } finally { fileSys.close(); cluster.shutdown(); } }

UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Tests mod & access time in DFS. */ @Test public void testTimes() throws IOException { Configuration conf=new HdfsConfiguration(); final int MAX_IDLE_TIME=2000; conf.setInt("ipc.client.connection.maxidletime",MAX_IDLE_TIME); conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,1000); conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,1); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build(); cluster.waitActive(); final int nnport=cluster.getNameNodePort(); InetSocketAddress addr=new InetSocketAddress("localhost",cluster.getNameNodePort()); DFSClient client=new DFSClient(addr,conf); DatanodeInfo[] info=client.datanodeReport(DatanodeReportType.LIVE); assertEquals("Number of Datanodes ",numDatanodes,info.length); FileSystem fileSys=cluster.getFileSystem(); int replicas=1; assertTrue(fileSys instanceof DistributedFileSystem); try { System.out.println("Creating testdir1 and testdir1/test1.dat."); Path dir1=new Path("testdir1"); Path file1=new Path(dir1,"test1.dat"); FSDataOutputStream stm=writeFile(fileSys,file1,replicas); FileStatus stat=fileSys.getFileStatus(file1); long atimeBeforeClose=stat.getAccessTime(); String adate=dateForm.format(new Date(atimeBeforeClose)); System.out.println("atime on " + file1 + " before close is "+ adate+ " ("+ atimeBeforeClose+ ")"); assertTrue(atimeBeforeClose != 0); stm.close(); stat=fileSys.getFileStatus(file1); long atime1=stat.getAccessTime(); long mtime1=stat.getModificationTime(); adate=dateForm.format(new Date(atime1)); String mdate=dateForm.format(new Date(mtime1)); System.out.println("atime on " + file1 + " is "+ adate+ " ("+ atime1+ ")"); System.out.println("mtime on " + file1 + " is "+ mdate+ " ("+ mtime1+ ")"); assertTrue(atime1 != 0); stat=fileSys.getFileStatus(dir1); long mdir1=stat.getAccessTime(); assertTrue(mdir1 == 0); long atime2=atime1 - (24L * 3600L * 1000L); fileSys.setTimes(file1,-1,atime2); stat=fileSys.getFileStatus(file1); long atime3=stat.getAccessTime(); String adate3=dateForm.format(new Date(atime3)); System.out.println("new atime on " + file1 + " is "+ adate3+ " ("+ atime3+ ")"); assertTrue(atime2 == atime3); assertTrue(mtime1 == stat.getModificationTime()); long mtime2=mtime1 - (3600L * 1000L); fileSys.setTimes(file1,mtime2,-1); stat=fileSys.getFileStatus(file1); long mtime3=stat.getModificationTime(); String mdate3=dateForm.format(new Date(mtime3)); System.out.println("new mtime on " + file1 + " is "+ mdate3+ " ("+ mtime3+ ")"); assertTrue(atime2 == stat.getAccessTime()); assertTrue(mtime2 == mtime3); long mtime4=Time.now() - (3600L * 1000L); long atime4=Time.now(); fileSys.setTimes(dir1,mtime4,atime4); stat=fileSys.getFileStatus(dir1); assertTrue("Not matching the modification times",mtime4 == stat.getModificationTime()); assertTrue("Not matching the access times",atime4 == stat.getAccessTime()); Path nonExistingDir=new Path(dir1,"/nonExistingDir/"); try { fileSys.setTimes(nonExistingDir,mtime4,atime4); fail("Expecting FileNotFoundException"); } catch ( FileNotFoundException e) { assertTrue(e.getMessage().contains("File/Directory " + nonExistingDir.toString() + " does not exist.")); } cluster.shutdown(); try { Thread.sleep(2 * MAX_IDLE_TIME); } catch ( InterruptedException e) { } cluster=new MiniDFSCluster.Builder(conf).nameNodePort(nnport).format(false).build(); cluster.waitActive(); fileSys=cluster.getFileSystem(); System.out.println("Verifying times after cluster restart"); stat=fileSys.getFileStatus(file1); assertTrue(atime2 == stat.getAccessTime()); assertTrue(mtime3 == stat.getModificationTime()); cleanupFile(fileSys,file1); cleanupFile(fileSys,dir1); } catch ( IOException e) { info=client.datanodeReport(DatanodeReportType.ALL); printDatanodeReport(info); throw e; } finally { fileSys.close(); cluster.shutdown(); } }

Class: org.apache.hadoop.hdfs.nfs.nfs3.TestDFSClientCache

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testEviction() throws IOException { NfsConfiguration conf=new NfsConfiguration(); conf.set(FileSystem.FS_DEFAULT_NAME_KEY,"hdfs://localhost"); final int MAX_CACHE_SIZE=2; DFSClientCache cache=new DFSClientCache(conf,MAX_CACHE_SIZE); DFSClient c1=cache.getDfsClient("test1"); assertTrue(cache.getDfsClient("test1").toString().contains("ugi=test1")); assertEquals(c1,cache.getDfsClient("test1")); assertFalse(isDfsClientClose(c1)); cache.getDfsClient("test2"); assertTrue(isDfsClientClose(c1)); assertEquals(MAX_CACHE_SIZE - 1,cache.clientCache.size()); }

Class: org.apache.hadoop.hdfs.nfs.nfs3.TestOffsetRange

BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testCompare() throws IOException { OffsetRange r1=new OffsetRange(0,1); OffsetRange r2=new OffsetRange(1,3); OffsetRange r3=new OffsetRange(1,3); OffsetRange r4=new OffsetRange(3,4); assertEquals(0,OffsetRange.ReverseComparatorOnMin.compare(r2,r3)); assertEquals(0,OffsetRange.ReverseComparatorOnMin.compare(r2,r2)); assertTrue(OffsetRange.ReverseComparatorOnMin.compare(r2,r1) < 0); assertTrue(OffsetRange.ReverseComparatorOnMin.compare(r2,r4) > 0); }

Class: org.apache.hadoop.hdfs.nfs.nfs3.TestWrites

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testCheckCommitFromRead() throws IOException { DFSClient dfsClient=Mockito.mock(DFSClient.class); Nfs3FileAttributes attr=new Nfs3FileAttributes(); HdfsDataOutputStream fos=Mockito.mock(HdfsDataOutputStream.class); Mockito.when(fos.getPos()).thenReturn((long)0); NfsConfiguration config=new NfsConfiguration(); OpenFileCtx ctx=new OpenFileCtx(fos,attr,"/dumpFilePath",dfsClient,new IdUserGroup(config)); FileHandle h=new FileHandle(1); COMMIT_STATUS ret; WriteManager wm=new WriteManager(new IdUserGroup(config),config,false); assertTrue(wm.addOpenFileStream(h,ctx)); ctx.setActiveStatusForTest(false); Channel ch=Mockito.mock(Channel.class); ret=ctx.checkCommit(dfsClient,0,ch,1,attr,true); assertEquals(COMMIT_STATUS.COMMIT_INACTIVE_CTX,ret); assertEquals(Nfs3Status.NFS3_OK,wm.commitBeforeRead(dfsClient,h,0)); ctx.getPendingWritesForTest().put(new OffsetRange(5,10),new WriteCtx(null,0,0,0,null,null,null,0,false,null)); ret=ctx.checkCommit(dfsClient,0,ch,1,attr,true); assertEquals(COMMIT_STATUS.COMMIT_INACTIVE_WITH_PENDING_WRITE,ret); assertEquals(Nfs3Status.NFS3ERR_IO,wm.commitBeforeRead(dfsClient,h,0)); ctx.setActiveStatusForTest(true); Mockito.when(fos.getPos()).thenReturn((long)10); COMMIT_STATUS status=ctx.checkCommitInternal(5,ch,1,attr,false); assertEquals(COMMIT_STATUS.COMMIT_DO_SYNC,status); ret=ctx.checkCommit(dfsClient,5,ch,1,attr,true); assertEquals(COMMIT_STATUS.COMMIT_FINISHED,ret); assertEquals(Nfs3Status.NFS3_OK,wm.commitBeforeRead(dfsClient,h,5)); status=ctx.checkCommitInternal(10,ch,1,attr,true); assertTrue(status == COMMIT_STATUS.COMMIT_DO_SYNC); ret=ctx.checkCommit(dfsClient,10,ch,1,attr,true); assertEquals(COMMIT_STATUS.COMMIT_FINISHED,ret); assertEquals(Nfs3Status.NFS3_OK,wm.commitBeforeRead(dfsClient,h,10)); ConcurrentNavigableMap commits=ctx.getPendingCommitsForTest(); assertTrue(commits.size() == 0); ret=ctx.checkCommit(dfsClient,11,ch,1,attr,true); assertEquals(COMMIT_STATUS.COMMIT_WAIT,ret); assertEquals(0,commits.size()); assertEquals(Nfs3Status.NFS3ERR_JUKEBOX,wm.commitBeforeRead(dfsClient,h,11)); ret=ctx.checkCommit(dfsClient,0,ch,1,attr,true); assertEquals(COMMIT_STATUS.COMMIT_WAIT,ret); assertEquals(0,commits.size()); assertEquals(Nfs3Status.NFS3ERR_JUKEBOX,wm.commitBeforeRead(dfsClient,h,0)); ctx.getPendingWritesForTest().remove(new OffsetRange(5,10)); ret=ctx.checkCommit(dfsClient,0,ch,1,attr,true); assertEquals(COMMIT_STATUS.COMMIT_FINISHED,ret); assertEquals(Nfs3Status.NFS3_OK,wm.commitBeforeRead(dfsClient,h,0)); }

Class: org.apache.hadoop.hdfs.protocol.TestLayoutVersion

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test to make sure NameNode.Feature support previous features */ @Test public void testNameNodeFeature(){ final LayoutFeature first=NameNodeLayoutVersion.Feature.ROLLING_UPGRADE; assertTrue(NameNodeLayoutVersion.supports(LAST_NON_RESERVED_COMMON_FEATURE,first.getInfo().getLayoutVersion())); assertEquals(LAST_COMMON_FEATURE.getInfo().getLayoutVersion() - 1,first.getInfo().getLayoutVersion()); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test to make sure DataNode.Feature support previous features */ @Test public void testDataNodeFeature(){ final LayoutFeature first=DataNodeLayoutVersion.Feature.FIRST_LAYOUT; assertTrue(DataNodeLayoutVersion.supports(LAST_NON_RESERVED_COMMON_FEATURE,first.getInfo().getLayoutVersion())); assertEquals(LAST_COMMON_FEATURE.getInfo().getLayoutVersion() - 1,first.getInfo().getLayoutVersion()); }

Class: org.apache.hadoop.hdfs.qjournal.TestNNWithQJM

UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
@Test(timeout=30000) public void testNewNamenodeTakesOverWriter() throws Exception { File nn1Dir=new File(MiniDFSCluster.getBaseDirectory() + "/TestNNWithQJM/image-nn1"); File nn2Dir=new File(MiniDFSCluster.getBaseDirectory() + "/TestNNWithQJM/image-nn2"); conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,nn1Dir.getAbsolutePath()); conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,mjc.getQuorumJournalURI("myjournal").toString()); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).manageNameDfsDirs(false).checkExitOnShutdown(false).build(); cluster.shutdown(); try { FileUtil.fullyDelete(nn2Dir); FileUtil.copy(nn1Dir,FileSystem.getLocal(conf).getRaw(),new Path(nn2Dir.getAbsolutePath()),false,conf); cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false).manageNameDfsDirs(false).checkExitOnShutdown(false).build(); cluster.getFileSystem().mkdirs(TEST_PATH); Configuration conf2=new Configuration(); conf2.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,nn2Dir.getAbsolutePath()); conf2.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,mjc.getQuorumJournalURI("myjournal").toString()); MiniDFSCluster cluster2=new MiniDFSCluster.Builder(conf2).numDataNodes(0).format(false).manageNameDfsDirs(false).build(); try { assertTrue(cluster2.getFileSystem().exists(TEST_PATH)); } finally { cluster2.shutdown(); } try { cluster.getFileSystem().mkdirs(new Path("/x")); fail("Did not abort trying to write to a fenced NN"); } catch ( RemoteException re) { GenericTestUtils.assertExceptionContains("Could not sync enough journals to persistent storage",re); } } finally { } }

Class: org.apache.hadoop.hdfs.qjournal.client.TestEpochsAreUnique

IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testSingleThreaded() throws IOException { Configuration conf=new Configuration(); MiniJournalCluster cluster=new MiniJournalCluster.Builder(conf).build(); URI uri=cluster.getQuorumJournalURI(JID); QuorumJournalManager qjm=new QuorumJournalManager(conf,uri,FAKE_NSINFO); try { qjm.format(FAKE_NSINFO); } finally { qjm.close(); } try { for (int i=0; i < 5; i++) { qjm=new QuorumJournalManager(conf,uri,FAKE_NSINFO); try { qjm.createNewUniqueEpoch(); assertEquals(i + 1,qjm.getLoggerSetForTests().getEpoch()); } finally { qjm.close(); } } long prevEpoch=5; for (int i=0; i < 20; i++) { long newEpoch=-1; while (true) { qjm=new QuorumJournalManager(conf,uri,FAKE_NSINFO,new FaultyLoggerFactory()); try { qjm.createNewUniqueEpoch(); newEpoch=qjm.getLoggerSetForTests().getEpoch(); break; } catch ( IOException ioe) { } finally { qjm.close(); } } LOG.info("Created epoch " + newEpoch); assertTrue("New epoch " + newEpoch + " should be greater than previous "+ prevEpoch,newEpoch > prevEpoch); prevEpoch=newEpoch; } } finally { cluster.shutdown(); } }

Class: org.apache.hadoop.hdfs.qjournal.client.TestIPCLoggerChannel

UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
/** * Test that, if the remote node gets unsynchronized (eg some edits were * missed or the node rebooted), the client stops sending edits until * the next roll. Test for HDFS-3726. */ @Test public void testStopSendingEditsWhenOutOfSync() throws Exception { Mockito.doThrow(new IOException("injected error")).when(mockProxy).journal(Mockito.any(),Mockito.eq(1L),Mockito.eq(1L),Mockito.eq(1),Mockito.same(FAKE_DATA)); try { ch.sendEdits(1L,1L,1,FAKE_DATA).get(); fail("Injected JOOSE did not cause sendEdits() to throw"); } catch ( ExecutionException ee) { GenericTestUtils.assertExceptionContains("injected",ee); } Mockito.verify(mockProxy).journal(Mockito.any(),Mockito.eq(1L),Mockito.eq(1L),Mockito.eq(1),Mockito.same(FAKE_DATA)); assertTrue(ch.isOutOfSync()); try { ch.sendEdits(1L,2L,1,FAKE_DATA).get(); fail("sendEdits() should throw until next roll"); } catch ( ExecutionException ee) { GenericTestUtils.assertExceptionContains("disabled until next roll",ee.getCause()); } Mockito.verify(mockProxy,Mockito.never()).journal(Mockito.any(),Mockito.eq(1L),Mockito.eq(2L),Mockito.eq(1),Mockito.same(FAKE_DATA)); Mockito.verify(mockProxy).heartbeat(Mockito.any()); ch.startLogSegment(3L,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION).get(); assertFalse(ch.isOutOfSync()); ch.sendEdits(3L,3L,1,FAKE_DATA).get(); }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
/** * Test that, once the queue eclipses the configure size limit, * calls to journal more data are rejected. */ @Test public void testQueueLimiting() throws Exception { DelayAnswer delayer=new DelayAnswer(LOG); Mockito.doAnswer(delayer).when(mockProxy).journal(Mockito.any(),Mockito.eq(1L),Mockito.eq(1L),Mockito.eq(1),Mockito.same(FAKE_DATA)); int numToQueue=LIMIT_QUEUE_SIZE_BYTES / FAKE_DATA.length; for (int i=1; i <= numToQueue; i++) { ch.sendEdits(1L,(long)i,1,FAKE_DATA); } assertEquals(LIMIT_QUEUE_SIZE_BYTES,ch.getQueuedEditsSize()); try { ch.sendEdits(1L,numToQueue + 1,1,FAKE_DATA).get(1,TimeUnit.SECONDS); fail("Did not fail to queue more calls after queue was full"); } catch ( ExecutionException ee) { if (!(ee.getCause() instanceof LoggerTooFarBehindException)) { throw ee; } } delayer.proceed(); GenericTestUtils.waitFor(new Supplier(){ @Override public Boolean get(){ return ch.getQueuedEditsSize() == 0; } } ,10,1000); }

Class: org.apache.hadoop.hdfs.qjournal.client.TestQuorumCall

APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testQuorums() throws Exception { Map> futures=ImmutableMap.of("f1",SettableFuture.create(),"f2",SettableFuture.create(),"f3",SettableFuture.create()); QuorumCall q=QuorumCall.create(futures); assertEquals(0,q.countResponses()); futures.get("f1").set("first future"); q.waitFor(1,0,0,100000,"test"); q.waitFor(0,1,0,100000,"test"); assertEquals(1,q.countResponses()); futures.get("f2").setException(new Exception("error")); assertEquals(2,q.countResponses()); futures.get("f3").set("second future"); q.waitFor(3,0,100,100000,"test"); q.waitFor(0,2,100,100000,"test"); assertEquals(3,q.countResponses()); assertEquals("f1=first future,f3=second future",Joiner.on(",").withKeyValueSeparator("=").join(new TreeMap(q.getResults()))); try { q.waitFor(0,4,100,10,"test"); fail("Didn't time out waiting for more responses than came back"); } catch ( TimeoutException te) { } }

Class: org.apache.hadoop.hdfs.qjournal.client.TestQuorumJournalManager

TestInitializer InternalCallVerifier EqualityVerifier HybridVerifier 
@Before public void setup() throws Exception { conf=new Configuration(); conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY,0); cluster=new MiniJournalCluster.Builder(conf).build(); qjm=createSpyingQJM(); spies=qjm.getLoggerSetForTests().getLoggersForTests(); qjm.format(QJMTestUtil.FAKE_NSINFO); qjm.recoverUnfinalizedSegments(); assertEquals(1,qjm.getLoggerSetForTests().getEpoch()); }

UtilityVerifier BooleanVerifier HybridVerifier 
/** * Regression test for HDFS-3891: selectInputStreams should throw * an exception when a majority of journalnodes have crashed. */ @Test public void testSelectInputStreamsMajorityDown() throws Exception { cluster.shutdown(); List streams=Lists.newArrayList(); try { qjm.selectInputStreams(streams,0,false); fail("Did not throw IOE"); } catch ( QuorumException ioe) { GenericTestUtils.assertExceptionContains("Got too many exceptions",ioe); assertTrue(streams.isEmpty()); } }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testReaderWhileAnotherWrites() throws Exception { QuorumJournalManager readerQjm=closeLater(createSpyingQJM()); List streams=Lists.newArrayList(); readerQjm.selectInputStreams(streams,0,false); assertEquals(0,streams.size()); writeSegment(cluster,qjm,1,3,true); readerQjm.selectInputStreams(streams,0,false); try { assertEquals(1,streams.size()); EditLogInputStream stream=streams.get(0); assertEquals(1,stream.getFirstTxId()); assertEquals(3,stream.getLastTxId()); verifyEdits(streams,1,3); assertNull(stream.readOp()); } finally { IOUtils.cleanup(LOG,streams.toArray(new Closeable[0])); streams.clear(); } writeSegment(cluster,qjm,4,3,false); readerQjm.selectInputStreams(streams,0,false); try { assertEquals(1,streams.size()); EditLogInputStream stream=streams.get(0); assertEquals(1,stream.getFirstTxId()); assertEquals(3,stream.getLastTxId()); verifyEdits(streams,1,3); } finally { IOUtils.cleanup(LOG,streams.toArray(new Closeable[0])); streams.clear(); } qjm.finalizeLogSegment(4,6); readerQjm.selectInputStreams(streams,0,false); try { assertEquals(2,streams.size()); assertEquals(4,streams.get(1).getFirstTxId()); assertEquals(6,streams.get(1).getLastTxId()); verifyEdits(streams,1,6); } finally { IOUtils.cleanup(LOG,streams.toArray(new Closeable[0])); streams.clear(); } }

APIUtilityVerifier UtilityVerifier EqualityVerifier HybridVerifier 
/** * Test the case where one of the loggers misses a finalizeLogSegment() * call, and then misses the next startLogSegment() call before coming * back to life. * Previously, this caused it to keep on writing to the old log segment, * such that one logger had eg edits_1-10 while the others had edits_1-5 and * edits_6-10. This caused recovery to fail in certain cases. */ @Test public void testMissFinalizeAndNextStart() throws Exception { futureThrows(new IOException("injected")).when(spies.get(0)).finalizeLogSegment(Mockito.eq(1L),Mockito.eq(3L)); futureThrows(new IOException("injected")).when(spies.get(0)).startLogSegment(Mockito.eq(4L),Mockito.eq(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION)); failLoggerAtTxn(spies.get(1),4L); writeSegment(cluster,qjm,1,3,true); EditLogOutputStream stm=qjm.startLogSegment(4,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION); try { writeTxns(stm,4,1); fail("Did not fail to write"); } catch ( QuorumException qe) { GenericTestUtils.assertExceptionContains("Writer out of sync",qe); } finally { stm.abort(); qjm.close(); } cluster.getJournalNode(2).stopAndJoin(0); qjm=createSpyingQJM(); long recovered=QJMTestUtil.recoverAndReturnLastTxn(qjm); assertEquals(3L,recovered); }

Class: org.apache.hadoop.hdfs.qjournal.server.TestJournal

UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testEpochHandling() throws Exception { assertEquals(0,journal.getLastPromisedEpoch()); NewEpochResponseProto newEpoch=journal.newEpoch(FAKE_NSINFO,1); assertFalse(newEpoch.hasLastSegmentTxId()); assertEquals(1,journal.getLastPromisedEpoch()); journal.newEpoch(FAKE_NSINFO,3); assertFalse(newEpoch.hasLastSegmentTxId()); assertEquals(3,journal.getLastPromisedEpoch()); try { journal.newEpoch(FAKE_NSINFO,3); fail("Should have failed to promise same epoch twice"); } catch ( IOException ioe) { GenericTestUtils.assertExceptionContains("Proposed epoch 3 <= last promise 3",ioe); } try { journal.startLogSegment(makeRI(1),12345L,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION); fail("Should have rejected call from prior epoch"); } catch ( IOException ioe) { GenericTestUtils.assertExceptionContains("epoch 1 is less than the last promised epoch 3",ioe); } try { journal.journal(makeRI(1),12345L,100L,0,new byte[0]); fail("Should have rejected call from prior epoch"); } catch ( IOException ioe) { GenericTestUtils.assertExceptionContains("epoch 1 is less than the last promised epoch 3",ioe); } }

UtilityVerifier AssumptionSetter HybridVerifier 
@Test(timeout=10000) public void testJournalLocking() throws Exception { Assume.assumeTrue(journal.getStorage().getStorageDir(0).isLockSupported()); StorageDirectory sd=journal.getStorage().getStorageDir(0); File lockFile=new File(sd.getRoot(),Storage.STORAGE_FILE_LOCK); GenericTestUtils.assertExists(lockFile); journal.newEpoch(FAKE_NSINFO,1); try { new Journal(conf,TEST_LOG_DIR,JID,StartupOption.REGULAR,mockErrorReporter); fail("Did not fail to create another journal in same dir"); } catch ( IOException ioe) { GenericTestUtils.assertExceptionContains("Cannot lock storage",ioe); } journal.close(); Journal journal2=new Journal(conf,TEST_LOG_DIR,JID,StartupOption.REGULAR,mockErrorReporter); journal2.newEpoch(FAKE_NSINFO,2); journal2.close(); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testFormatResetsCachedValues() throws Exception { journal.newEpoch(FAKE_NSINFO,12345L); journal.startLogSegment(new RequestInfo(JID,12345L,1L,0L),1L,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION); assertEquals(12345L,journal.getLastPromisedEpoch()); assertEquals(12345L,journal.getLastWriterEpoch()); assertTrue(journal.isFormatted()); journal.close(); journal.format(FAKE_NSINFO_2); assertEquals(0,journal.getLastPromisedEpoch()); assertEquals(0,journal.getLastWriterEpoch()); assertTrue(journal.isFormatted()); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test whether JNs can correctly handle editlog that cannot be decoded. */ @Test public void testScanEditLog() throws Exception { journal.startLogSegment(makeRI(1),1,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION - 1); final int numTxns=5; byte[] ops=QJMTestUtil.createGabageTxns(1,5); journal.journal(makeRI(2),1,1,numTxns,ops); SegmentStateProto segmentState=journal.getSegmentInfo(1); assertTrue(segmentState.getIsInProgress()); Assert.assertEquals(numTxns,segmentState.getEndTxId()); Assert.assertEquals(1,segmentState.getStartTxId()); journal.finalizeLogSegment(makeRI(3),1,numTxns); segmentState=journal.getSegmentInfo(1); assertFalse(segmentState.getIsInProgress()); Assert.assertEquals(numTxns,segmentState.getEndTxId()); Assert.assertEquals(1,segmentState.getStartTxId()); }

Class: org.apache.hadoop.hdfs.qjournal.server.TestJournalNode

UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test that the JournalNode performs correctly as a Paxos * Acceptor process. */ @Test(timeout=100000) public void testAcceptRecoveryBehavior() throws Exception { try { ch.prepareRecovery(1L).get(); fail("Did not throw IllegalState when trying to run paxos without an epoch"); } catch ( ExecutionException ise) { GenericTestUtils.assertExceptionContains("bad epoch",ise); } ch.newEpoch(1).get(); ch.setEpoch(1); PrepareRecoveryResponseProto prep=ch.prepareRecovery(1L).get(); System.err.println("Prep: " + prep); assertFalse(prep.hasAcceptedInEpoch()); assertFalse(prep.hasSegmentState()); ch.startLogSegment(1L,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION).get(); ch.sendEdits(1L,1L,1,QJMTestUtil.createTxnData(1,1)).get(); prep=ch.prepareRecovery(1L).get(); System.err.println("Prep: " + prep); assertFalse(prep.hasAcceptedInEpoch()); assertTrue(prep.hasSegmentState()); ch.acceptRecovery(prep.getSegmentState(),new URL("file:///dev/null")).get(); ch.newEpoch(2); ch.setEpoch(2); prep=ch.prepareRecovery(1L).get(); assertEquals(1L,prep.getAcceptedInEpoch()); assertEquals(1L,prep.getSegmentState().getEndTxId()); ch.setEpoch(1); try { ch.prepareRecovery(1L).get(); fail("prepare from earlier epoch not rejected"); } catch ( ExecutionException ioe) { GenericTestUtils.assertExceptionContains("epoch 1 is less than the last promised epoch 2",ioe); } try { ch.acceptRecovery(prep.getSegmentState(),new URL("file:///dev/null")).get(); fail("accept from earlier epoch not rejected"); } catch ( ExecutionException ioe) { GenericTestUtils.assertExceptionContains("epoch 1 is less than the last promised epoch 2",ioe); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=100000) public void testHttpServer() throws Exception { String urlRoot=jn.getHttpServerURI(); String pageContents=DFSTestUtil.urlGet(new URL(urlRoot + "/jmx")); assertTrue("Bad contents: " + pageContents,pageContents.contains("Hadoop:service=JournalNode,name=JvmMetrics")); byte[] EDITS_DATA=QJMTestUtil.createTxnData(1,3); IPCLoggerChannel ch=new IPCLoggerChannel(conf,FAKE_NSINFO,journalId,jn.getBoundIpcAddress()); ch.newEpoch(1).get(); ch.setEpoch(1); ch.startLogSegment(1,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION).get(); ch.sendEdits(1L,1,3,EDITS_DATA).get(); ch.finalizeLogSegment(1,3).get(); byte[] retrievedViaHttp=DFSTestUtil.urlGetBytes(new URL(urlRoot + "/getJournal?segmentTxId=1&jid=" + journalId)); byte[] expected=Bytes.concat(Ints.toByteArray(HdfsConstants.NAMENODE_LAYOUT_VERSION),(new byte[]{0,0,0,0}),EDITS_DATA); assertArrayEquals(expected,retrievedViaHttp); URL badUrl=new URL(urlRoot + "/getJournal?segmentTxId=12345&jid=" + journalId); HttpURLConnection connection=(HttpURLConnection)badUrl.openConnection(); try { assertEquals(404,connection.getResponseCode()); } finally { connection.disconnect(); } }

Class: org.apache.hadoop.hdfs.qjournal.server.TestJournalNodeMXBean

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testJournalNodeMXBean() throws Exception { MBeanServer mbs=ManagementFactory.getPlatformMBeanServer(); ObjectName mxbeanName=new ObjectName("Hadoop:service=JournalNode,name=JournalNodeInfo"); String journalStatus=(String)mbs.getAttribute(mxbeanName,"JournalsStatus"); assertEquals(jn.getJournalsStatus(),journalStatus); assertFalse(journalStatus.contains(NAMESERVICE)); final NamespaceInfo FAKE_NSINFO=new NamespaceInfo(12345,"mycluster","my-bp",0L); jn.getOrCreateJournal(NAMESERVICE).format(FAKE_NSINFO); journalStatus=(String)mbs.getAttribute(mxbeanName,"JournalsStatus"); assertEquals(jn.getJournalsStatus(),journalStatus); Map> jMap=new HashMap>(); Map infoMap=new HashMap(); infoMap.put("Formatted","true"); jMap.put(NAMESERVICE,infoMap); assertEquals(JSON.toString(jMap),journalStatus); jCluster=new MiniJournalCluster.Builder(new Configuration()).format(false).numJournalNodes(NUM_JN).build(); jn=jCluster.getJournalNode(0); journalStatus=(String)mbs.getAttribute(mxbeanName,"JournalsStatus"); assertEquals(jn.getJournalsStatus(),journalStatus); jMap=new HashMap>(); infoMap=new HashMap(); infoMap.put("Formatted","true"); jMap.put(NAMESERVICE,infoMap); assertEquals(JSON.toString(jMap),journalStatus); }

Class: org.apache.hadoop.hdfs.security.TestDelegationToken

APIUtilityVerifier InternalCallVerifier IdentityVerifier EqualityVerifier HybridVerifier 
@Test public void testDelegationTokenWebHdfsApi() throws Exception { ((Log4JLogger)NamenodeWebHdfsMethods.LOG).getLogger().setLevel(Level.ALL); final String uri=WebHdfsFileSystem.SCHEME + "://" + config.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY); final UserGroupInformation ugi=UserGroupInformation.createUserForTesting("JobTracker",new String[]{"user"}); final WebHdfsFileSystem webhdfs=ugi.doAs(new PrivilegedExceptionAction(){ @Override public WebHdfsFileSystem run() throws Exception { return (WebHdfsFileSystem)FileSystem.get(new URI(uri),config); } } ); { Credentials creds=new Credentials(); final Token tokens[]=webhdfs.addDelegationTokens("JobTracker",creds); Assert.assertEquals(1,tokens.length); Assert.assertEquals(1,creds.numberOfTokens()); Assert.assertSame(tokens[0],creds.getAllTokens().iterator().next()); checkTokenIdentifier(ugi,tokens[0]); final Token tokens2[]=webhdfs.addDelegationTokens("JobTracker",creds); Assert.assertEquals(0,tokens2.length); } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testDelegationTokenWithDoAs() throws Exception { final DistributedFileSystem dfs=cluster.getFileSystem(); final Credentials creds=new Credentials(); final Token tokens[]=dfs.addDelegationTokens("JobTracker",creds); Assert.assertEquals(1,tokens.length); @SuppressWarnings("unchecked") final Token token=(Token)tokens[0]; final UserGroupInformation longUgi=UserGroupInformation.createRemoteUser("JobTracker/foo.com@FOO.COM"); final UserGroupInformation shortUgi=UserGroupInformation.createRemoteUser("JobTracker"); longUgi.doAs(new PrivilegedExceptionAction(){ @Override public Object run() throws IOException { try { token.renew(config); } catch ( Exception e) { Assert.fail("Could not renew delegation token for user " + longUgi); } return null; } } ); shortUgi.doAs(new PrivilegedExceptionAction(){ @Override public Object run() throws Exception { token.renew(config); return null; } } ); longUgi.doAs(new PrivilegedExceptionAction(){ @Override public Object run() throws IOException { try { token.cancel(config); } catch ( Exception e) { Assert.fail("Could not cancel delegation token for user " + longUgi); } return null; } } ); }

UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
@Test public void testDelegationTokenSecretManager() throws Exception { Token token=generateDelegationToken("SomeUser","JobTracker"); try { dtSecretManager.renewToken(token,"FakeRenewer"); Assert.fail("should have failed"); } catch ( AccessControlException ace) { } dtSecretManager.renewToken(token,"JobTracker"); DelegationTokenIdentifier identifier=new DelegationTokenIdentifier(); byte[] tokenId=token.getIdentifier(); identifier.readFields(new DataInputStream(new ByteArrayInputStream(tokenId))); Assert.assertTrue(null != dtSecretManager.retrievePassword(identifier)); LOG.info("Sleep to expire the token"); Thread.sleep(6000); try { dtSecretManager.retrievePassword(identifier); Assert.fail("Token should have expired"); } catch ( InvalidToken e) { } dtSecretManager.renewToken(token,"JobTracker"); LOG.info("Sleep beyond the max lifetime"); Thread.sleep(5000); try { dtSecretManager.renewToken(token,"JobTracker"); Assert.fail("should have been expired"); } catch ( InvalidToken it) { } }

Class: org.apache.hadoop.hdfs.security.token.block.TestBlockToken

APIUtilityVerifier IterativeVerifier BranchVerifier UtilityVerifier InternalCallVerifier AssumptionSetter EqualityVerifier HybridVerifier 
/** * Test that fast repeated invocations of createClientDatanodeProtocolProxy * will not end up using up thousands of sockets. This is a regression test * for HDFS-1965. */ @Test public void testBlockTokenRpcLeak() throws Exception { Configuration conf=new Configuration(); conf.set(HADOOP_SECURITY_AUTHENTICATION,"kerberos"); UserGroupInformation.setConfiguration(conf); Assume.assumeTrue(FD_DIR.exists()); BlockTokenSecretManager sm=new BlockTokenSecretManager(blockKeyUpdateInterval,blockTokenLifetime,0,"fake-pool",null); Token token=sm.generateToken(block3,EnumSet.allOf(BlockTokenSecretManager.AccessMode.class)); final Server server=createMockDatanode(sm,token,conf); server.start(); final InetSocketAddress addr=NetUtils.getConnectAddress(server); DatanodeID fakeDnId=DFSTestUtil.getLocalDatanodeID(addr.getPort()); ExtendedBlock b=new ExtendedBlock("fake-pool",new Block(12345L)); LocatedBlock fakeBlock=new LocatedBlock(b,new DatanodeInfo[0]); fakeBlock.setBlockToken(token); ClientDatanodeProtocol proxyToNoWhere=RPC.getProxy(ClientDatanodeProtocol.class,ClientDatanodeProtocol.versionID,new InetSocketAddress("1.1.1.1",1),UserGroupInformation.createRemoteUser("junk"),conf,NetUtils.getDefaultSocketFactory(conf)); ClientDatanodeProtocol proxy=null; int fdsAtStart=countOpenFileDescriptors(); try { long endTime=Time.now() + 3000; while (Time.now() < endTime) { proxy=DFSUtil.createClientDatanodeProtocolProxy(fakeDnId,conf,1000,false,fakeBlock); assertEquals(block3.getBlockId(),proxy.getReplicaVisibleLength(block3)); if (proxy != null) { RPC.stopProxy(proxy); } LOG.info("Num open fds:" + countOpenFileDescriptors()); } int fdsAtEnd=countOpenFileDescriptors(); if (fdsAtEnd - fdsAtStart > 50) { fail("Leaked " + (fdsAtEnd - fdsAtStart) + " fds!"); } } finally { server.stop(); } RPC.stopProxy(proxyToNoWhere); }

Class: org.apache.hadoop.hdfs.server.balancer.TestBalancer

UtilityVerifier EqualityVerifier HybridVerifier 
/** * Test parse method in Balancer#Cli class with threshold value out of * boundaries. */ @Test(timeout=100000) public void testBalancerCliParseWithThresholdOutOfBoundaries(){ String parameters[]=new String[]{"-threshold","0"}; String reason="IllegalArgumentException is expected when threshold value" + " is out of boundary."; try { Balancer.Cli.parse(parameters); fail(reason); } catch ( IllegalArgumentException e) { assertEquals("Number out of range: threshold = 0.0",e.getMessage()); } parameters=new String[]{"-threshold","101"}; try { Balancer.Cli.parse(parameters); fail(reason); } catch ( IllegalArgumentException e) { assertEquals("Number out of range: threshold = 101.0",e.getMessage()); } }

Class: org.apache.hadoop.hdfs.server.balancer.TestBalancerWithHANameNodes

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
/** * Test a cluster with even distribution, then a new empty node is added to * the cluster. Test start a cluster with specified number of nodes, and fills * it to be 30% full (with a single file replicated identically to all * datanodes); It then adds one new empty node and starts balancing. */ @Test(timeout=60000) public void testBalancerWithHANameNodes() throws Exception { Configuration conf=new HdfsConfiguration(); TestBalancer.initConf(conf); long newNodeCapacity=TestBalancer.CAPACITY; String newNodeRack=TestBalancer.RACK2; String[] racks=new String[]{TestBalancer.RACK0,TestBalancer.RACK1}; long[] capacities=new long[]{TestBalancer.CAPACITY,TestBalancer.CAPACITY}; assertEquals(capacities.length,racks.length); int numOfDatanodes=capacities.length; NNConf nn1Conf=new MiniDFSNNTopology.NNConf("nn1"); nn1Conf.setIpcPort(NameNode.DEFAULT_PORT); Configuration copiedConf=new Configuration(conf); cluster=new MiniDFSCluster.Builder(copiedConf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(capacities.length).racks(racks).simulatedCapacities(capacities).build(); HATestUtil.setFailoverConfigurations(cluster,conf); try { cluster.waitActive(); cluster.transitionToActive(1); Thread.sleep(500); client=NameNodeProxies.createProxy(conf,FileSystem.getDefaultUri(conf),ClientProtocol.class).getProxy(); long totalCapacity=TestBalancer.sum(capacities); long totalUsedSpace=totalCapacity * 3 / 10; TestBalancer.createFile(cluster,TestBalancer.filePath,totalUsedSpace / numOfDatanodes,(short)numOfDatanodes,1); cluster.startDataNodes(conf,1,true,null,new String[]{newNodeRack},new long[]{newNodeCapacity}); totalCapacity+=newNodeCapacity; TestBalancer.waitForHeartBeat(totalUsedSpace,totalCapacity,client,cluster); Collection namenodes=DFSUtil.getNsServiceRpcUris(conf); assertEquals(1,namenodes.size()); assertTrue(namenodes.contains(HATestUtil.getLogicalUri(cluster))); final int r=Balancer.run(namenodes,Balancer.Parameters.DEFAULT,conf); assertEquals(ExitStatus.SUCCESS.getExitCode(),r); TestBalancer.waitForBalancer(totalUsedSpace,totalCapacity,client,cluster,Balancer.Parameters.DEFAULT); } finally { cluster.shutdown(); } }

Class: org.apache.hadoop.hdfs.server.blockmanagement.TestBlockInfo

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testAddStorage() throws Exception { BlockInfo blockInfo=new BlockInfo(3); final DatanodeStorageInfo storage=DFSTestUtil.createDatanodeStorageInfo("storageID","127.0.0.1"); boolean added=blockInfo.addStorage(storage); Assert.assertTrue(added); Assert.assertEquals(storage,blockInfo.getStorageInfo(0)); }

APIUtilityVerifier IterativeVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testBlockListMoveToHead() throws Exception { LOG.info("BlockInfo moveToHead tests..."); final int MAX_BLOCKS=10; DatanodeStorageInfo dd=DFSTestUtil.createDatanodeStorageInfo("s1","1.1.1.1"); ArrayList blockList=new ArrayList(MAX_BLOCKS); ArrayList blockInfoList=new ArrayList(); int headIndex; int curIndex; LOG.info("Building block list..."); for (int i=0; i < MAX_BLOCKS; i++) { blockList.add(new Block(i,0,GenerationStamp.LAST_RESERVED_STAMP)); blockInfoList.add(new BlockInfo(blockList.get(i),3)); dd.addBlock(blockInfoList.get(i)); assertEquals("Find datanode should be 0",0,blockInfoList.get(i).findStorageInfo(dd)); } LOG.info("Checking list length..."); assertEquals("Length should be MAX_BLOCK",MAX_BLOCKS,dd.numBlocks()); Iterator it=dd.getBlockIterator(); int len=0; while (it.hasNext()) { it.next(); len++; } assertEquals("There should be MAX_BLOCK blockInfo's",MAX_BLOCKS,len); headIndex=dd.getBlockListHeadForTesting().findStorageInfo(dd); LOG.info("Moving each block to the head of the list..."); for (int i=0; i < MAX_BLOCKS; i++) { curIndex=blockInfoList.get(i).findStorageInfo(dd); headIndex=dd.moveBlockToHead(blockInfoList.get(i),curIndex,headIndex); assertEquals("Block should be at the head of the list now.",blockInfoList.get(i),dd.getBlockListHeadForTesting()); } LOG.info("Moving head to the head..."); BlockInfo temp=dd.getBlockListHeadForTesting(); curIndex=0; headIndex=0; dd.moveBlockToHead(temp,curIndex,headIndex); assertEquals("Moving head to the head of the list shopuld not change the list",temp,dd.getBlockListHeadForTesting()); LOG.info("Checking elements of the list..."); temp=dd.getBlockListHeadForTesting(); assertNotNull("Head should not be null",temp); int c=MAX_BLOCKS - 1; while (temp != null) { assertEquals("Expected element is not on the list",blockInfoList.get(c--),temp); temp=temp.getNext(0); } LOG.info("Moving random blocks to the head of the list..."); headIndex=dd.getBlockListHeadForTesting().findStorageInfo(dd); Random rand=new Random(); for (int i=0; i < MAX_BLOCKS; i++) { int j=rand.nextInt(MAX_BLOCKS); curIndex=blockInfoList.get(j).findStorageInfo(dd); headIndex=dd.moveBlockToHead(blockInfoList.get(j),curIndex,headIndex); assertEquals("Block should be at the head of the list now.",blockInfoList.get(j),dd.getBlockListHeadForTesting()); } }

Class: org.apache.hadoop.hdfs.server.blockmanagement.TestBlockTokenWithDFS

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testRead() throws Exception { MiniDFSCluster cluster=null; int numDataNodes=2; Configuration conf=getConf(numDataNodes); try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build(); cluster.waitActive(); assertEquals(numDataNodes,cluster.getDataNodes().size()); final NameNode nn=cluster.getNameNode(); final NamenodeProtocols nnProto=nn.getRpcServer(); final BlockManager bm=nn.getNamesystem().getBlockManager(); final BlockTokenSecretManager sm=bm.getBlockTokenSecretManager(); SecurityTestUtil.setBlockTokenLifetime(sm,1000L); Path fileToRead=new Path(FILE_TO_READ); FileSystem fs=cluster.getFileSystem(); createFile(fs,fileToRead); FSDataInputStream in1=fs.open(fileToRead); assertTrue(checkFile1(in1)); FSDataInputStream in2=fs.open(fileToRead); assertTrue(checkFile1(in2)); FSDataInputStream in3=fs.open(fileToRead); assertTrue(checkFile2(in3)); DFSClient client=null; try { client=new DFSClient(new InetSocketAddress("localhost",cluster.getNameNodePort()),conf); } finally { if (client != null) client.close(); } List locatedBlocks=nnProto.getBlockLocations(FILE_TO_READ,0,FILE_SIZE).getLocatedBlocks(); LocatedBlock lblock=locatedBlocks.get(0); Token myToken=lblock.getBlockToken(); assertFalse(SecurityTestUtil.isBlockTokenExpired(myToken)); tryRead(conf,lblock,true); while (!SecurityTestUtil.isBlockTokenExpired(myToken)) { try { Thread.sleep(10); } catch ( InterruptedException ignored) { } } assertTrue(SecurityTestUtil.isBlockTokenExpired(myToken)); tryRead(conf,lblock,false); lblock.setBlockToken(sm.generateToken(lblock.getBlock(),EnumSet.of(BlockTokenSecretManager.AccessMode.READ))); tryRead(conf,lblock,true); ExtendedBlock wrongBlock=new ExtendedBlock(lblock.getBlock().getBlockPoolId(),lblock.getBlock().getBlockId() + 1); lblock.setBlockToken(sm.generateToken(wrongBlock,EnumSet.of(BlockTokenSecretManager.AccessMode.READ))); tryRead(conf,lblock,false); lblock.setBlockToken(sm.generateToken(lblock.getBlock(),EnumSet.of(BlockTokenSecretManager.AccessMode.WRITE,BlockTokenSecretManager.AccessMode.COPY,BlockTokenSecretManager.AccessMode.REPLACE))); tryRead(conf,lblock,false); SecurityTestUtil.setBlockTokenLifetime(sm,600 * 1000L); List lblocks=DFSTestUtil.getAllBlocks(in1); for ( LocatedBlock blk : lblocks) { assertTrue(SecurityTestUtil.isBlockTokenExpired(blk.getBlockToken())); } in1.seek(0); assertTrue(checkFile1(in1)); List lblocks2=DFSTestUtil.getAllBlocks(in2); for ( LocatedBlock blk : lblocks2) { assertTrue(SecurityTestUtil.isBlockTokenExpired(blk.getBlockToken())); } assertTrue(in2.seekToNewSource(0)); assertTrue(checkFile1(in2)); List lblocks3=DFSTestUtil.getAllBlocks(in3); for ( LocatedBlock blk : lblocks3) { assertTrue(SecurityTestUtil.isBlockTokenExpired(blk.getBlockToken())); } assertTrue(checkFile2(in3)); assertTrue(cluster.restartDataNodes(true)); cluster.waitActive(); assertEquals(numDataNodes,cluster.getDataNodes().size()); cluster.shutdownNameNode(0); lblocks=DFSTestUtil.getAllBlocks(in1); for ( LocatedBlock blk : lblocks) { assertFalse(SecurityTestUtil.isBlockTokenExpired(blk.getBlockToken())); } in1.seek(0); assertTrue(checkFile1(in1)); lblocks2=DFSTestUtil.getAllBlocks(in2); for ( LocatedBlock blk : lblocks2) { assertFalse(SecurityTestUtil.isBlockTokenExpired(blk.getBlockToken())); } in2.seekToNewSource(0); assertTrue(checkFile1(in2)); lblocks3=DFSTestUtil.getAllBlocks(in3); for ( LocatedBlock blk : lblocks3) { assertFalse(SecurityTestUtil.isBlockTokenExpired(blk.getBlockToken())); } assertTrue(checkFile2(in3)); cluster.restartNameNode(0); cluster.shutdownNameNode(0); in1.seek(0); assertTrue(checkFile1(in1)); in2.seekToNewSource(0); assertTrue(checkFile1(in2)); assertTrue(checkFile2(in3)); cluster.restartNameNode(0); assertTrue(cluster.restartDataNodes(true)); cluster.waitActive(); assertEquals(numDataNodes,cluster.getDataNodes().size()); cluster.shutdownNameNode(0); in1.seek(0); assertFalse(checkFile1(in1)); assertFalse(checkFile2(in3)); cluster.restartNameNode(0); in1.seek(0); assertTrue(checkFile1(in1)); in2.seekToNewSource(0); assertTrue(checkFile1(in2)); assertTrue(checkFile2(in3)); assertTrue(cluster.restartDataNodes(false)); cluster.waitActive(); assertEquals(numDataNodes,cluster.getDataNodes().size()); in1.seek(0); assertTrue(checkFile1(in1)); in2.seekToNewSource(0); assertTrue(checkFile1(in2)); assertTrue(checkFile2(in3)); } finally { if (cluster != null) { cluster.shutdown(); } } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * testing that APPEND operation can handle token expiration when * re-establishing pipeline is needed */ @Test public void testAppend() throws Exception { MiniDFSCluster cluster=null; int numDataNodes=2; Configuration conf=getConf(numDataNodes); try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build(); cluster.waitActive(); assertEquals(numDataNodes,cluster.getDataNodes().size()); final NameNode nn=cluster.getNameNode(); final BlockManager bm=nn.getNamesystem().getBlockManager(); final BlockTokenSecretManager sm=bm.getBlockTokenSecretManager(); SecurityTestUtil.setBlockTokenLifetime(sm,1000L); Path fileToAppend=new Path(FILE_TO_APPEND); FileSystem fs=cluster.getFileSystem(); FSDataOutputStream stm=writeFile(fs,fileToAppend,(short)numDataNodes,BLOCK_SIZE); stm.write(rawData,0,1); stm.close(); stm=fs.append(fileToAppend); int mid=rawData.length - 1; stm.write(rawData,1,mid - 1); stm.hflush(); Token token=DFSTestUtil.getBlockToken(stm); while (!SecurityTestUtil.isBlockTokenExpired(token)) { try { Thread.sleep(10); } catch ( InterruptedException ignored) { } } cluster.stopDataNode(0); stm.write(rawData,mid,rawData.length - mid); stm.close(); FSDataInputStream in5=fs.open(fileToAppend); assertTrue(checkFile1(in5)); } finally { if (cluster != null) { cluster.shutdown(); } } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * testing that WRITE operation can handle token expiration when * re-establishing pipeline is needed */ @Test public void testWrite() throws Exception { MiniDFSCluster cluster=null; int numDataNodes=2; Configuration conf=getConf(numDataNodes); try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build(); cluster.waitActive(); assertEquals(numDataNodes,cluster.getDataNodes().size()); final NameNode nn=cluster.getNameNode(); final BlockManager bm=nn.getNamesystem().getBlockManager(); final BlockTokenSecretManager sm=bm.getBlockTokenSecretManager(); SecurityTestUtil.setBlockTokenLifetime(sm,1000L); Path fileToWrite=new Path(FILE_TO_WRITE); FileSystem fs=cluster.getFileSystem(); FSDataOutputStream stm=writeFile(fs,fileToWrite,(short)numDataNodes,BLOCK_SIZE); int mid=rawData.length - 1; stm.write(rawData,0,mid); stm.hflush(); Token token=DFSTestUtil.getBlockToken(stm); while (!SecurityTestUtil.isBlockTokenExpired(token)) { try { Thread.sleep(10); } catch ( InterruptedException ignored) { } } cluster.stopDataNode(0); stm.write(rawData,mid,rawData.length - mid); stm.close(); FSDataInputStream in4=fs.open(fileToWrite); assertTrue(checkFile1(in4)); } finally { if (cluster != null) { cluster.shutdown(); } } }

Class: org.apache.hadoop.hdfs.server.blockmanagement.TestBlocksWithNotEnoughRacks

APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
@Test public void testCorruptBlockRereplicatedAcrossRacks() throws Exception { Configuration conf=getConf(); short REPLICATION_FACTOR=2; int fileLen=512; final Path filePath=new Path("/testFile"); String racks[]={"/rack1","/rack1","/rack2","/rack2"}; MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(racks.length).racks(racks).build(); final FSNamesystem ns=cluster.getNameNode().getNamesystem(); try { final FileSystem fs=cluster.getFileSystem(); DFSTestUtil.createFile(fs,filePath,fileLen,REPLICATION_FACTOR,1L); final String fileContent=DFSTestUtil.readFile(fs,filePath); ExtendedBlock b=DFSTestUtil.getFirstBlock(fs,filePath); DFSTestUtil.waitForReplication(cluster,b,2,REPLICATION_FACTOR,0); int dnToCorrupt=DFSTestUtil.firstDnWithBlock(cluster,b); assertTrue(MiniDFSCluster.corruptReplica(dnToCorrupt,b)); cluster.restartDataNode(dnToCorrupt); DFSTestUtil.waitCorruptReplicas(fs,ns,filePath,b,1); DFSTestUtil.waitForReplication(cluster,b,2,REPLICATION_FACTOR,0); for (int i=0; i < racks.length; i++) { String blockContent=cluster.readBlockOnDataNode(i,b); if (blockContent != null && i != dnToCorrupt) { assertEquals("Corrupt replica",fileContent,blockContent); } } } finally { cluster.shutdown(); } }

Class: org.apache.hadoop.hdfs.server.blockmanagement.TestCachedBlocksList

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=60000) public void testSingleList(){ DatanodeDescriptor dn=new DatanodeDescriptor(new DatanodeID("127.0.0.1","localhost","abcd",5000,5001,5002,5003)); CachedBlock[] blocks=new CachedBlock[]{new CachedBlock(0L,(short)1,true),new CachedBlock(1L,(short)1,true),new CachedBlock(2L,(short)1,true)}; Assert.assertTrue("expected pending cached list to start off empty.",!dn.getPendingCached().iterator().hasNext()); Assert.assertTrue("expected cached list to start off empty.",!dn.getCached().iterator().hasNext()); Assert.assertTrue("expected pending uncached list to start off empty.",!dn.getPendingUncached().iterator().hasNext()); Assert.assertTrue(dn.getCached().add(blocks[0])); Assert.assertTrue("expected pending cached list to still be empty.",!dn.getPendingCached().iterator().hasNext()); Assert.assertEquals("failed to insert blocks[0]",blocks[0],dn.getCached().iterator().next()); Assert.assertTrue("expected pending uncached list to still be empty.",!dn.getPendingUncached().iterator().hasNext()); Assert.assertTrue(dn.getCached().add(blocks[1])); Iterator iter=dn.getCached().iterator(); Assert.assertEquals(blocks[0],iter.next()); Assert.assertEquals(blocks[1],iter.next()); Assert.assertTrue(!iter.hasNext()); Assert.assertTrue(dn.getCached().addFirst(blocks[2])); iter=dn.getCached().iterator(); Assert.assertEquals(blocks[2],iter.next()); Assert.assertEquals(blocks[0],iter.next()); Assert.assertEquals(blocks[1],iter.next()); Assert.assertTrue(!iter.hasNext()); Assert.assertTrue(dn.getCached().remove(blocks[0])); iter=dn.getCached().iterator(); Assert.assertEquals(blocks[2],iter.next()); Assert.assertEquals(blocks[1],iter.next()); Assert.assertTrue(!iter.hasNext()); dn.getCached().clear(); Assert.assertTrue("expected cached list to be empty after clear.",!dn.getPendingCached().iterator().hasNext()); }

Class: org.apache.hadoop.hdfs.server.blockmanagement.TestCorruptReplicaInfo

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testCorruptReplicaInfo() throws IOException, InterruptedException { CorruptReplicasMap crm=new CorruptReplicasMap(); assertEquals("Number of corrupt blocks must initially be 0",0,crm.size()); assertNull("Param n cannot be less than 0",crm.getCorruptReplicaBlockIds(-1,null)); assertNull("Param n cannot be greater than 100",crm.getCorruptReplicaBlockIds(101,null)); long[] l=crm.getCorruptReplicaBlockIds(0,null); assertNotNull("n = 0 must return non-null",l); assertEquals("n = 0 must return an empty list",0,l.length); int NUM_BLOCK_IDS=140; List block_ids=new LinkedList(); for (int i=0; i < NUM_BLOCK_IDS; i++) { block_ids.add((long)i); } DatanodeDescriptor dn1=DFSTestUtil.getLocalDatanodeDescriptor(); DatanodeDescriptor dn2=DFSTestUtil.getLocalDatanodeDescriptor(); addToCorruptReplicasMap(crm,getBlock(0),dn1); assertEquals("Number of corrupt blocks not returning correctly",1,crm.size()); addToCorruptReplicasMap(crm,getBlock(1),dn1); assertEquals("Number of corrupt blocks not returning correctly",2,crm.size()); addToCorruptReplicasMap(crm,getBlock(1),dn2); assertEquals("Number of corrupt blocks not returning correctly",2,crm.size()); crm.removeFromCorruptReplicasMap(getBlock(1)); assertEquals("Number of corrupt blocks not returning correctly",1,crm.size()); crm.removeFromCorruptReplicasMap(getBlock(0)); assertEquals("Number of corrupt blocks not returning correctly",0,crm.size()); for ( Long block_id : block_ids) { addToCorruptReplicasMap(crm,getBlock(block_id),dn1); } assertEquals("Number of corrupt blocks not returning correctly",NUM_BLOCK_IDS,crm.size()); assertTrue("First five block ids not returned correctly ",Arrays.equals(new long[]{0,1,2,3,4},crm.getCorruptReplicaBlockIds(5,null))); LOG.info(crm.getCorruptReplicaBlockIds(10,7L)); LOG.info(block_ids.subList(7,18)); assertTrue("10 blocks after 7 not returned correctly ",Arrays.equals(new long[]{8,9,10,11,12,13,14,15,16,17},crm.getCorruptReplicaBlockIds(10,7L))); }

Class: org.apache.hadoop.hdfs.server.blockmanagement.TestDatanodeDescriptor

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testBlocksCounter() throws Exception { DatanodeDescriptor dd=BlockManagerTestUtil.getLocalDatanodeDescriptor(true); assertEquals(0,dd.numBlocks()); BlockInfo blk=new BlockInfo(new Block(1L),1); BlockInfo blk1=new BlockInfo(new Block(2L),2); DatanodeStorageInfo[] storages=dd.getStorageInfos(); assertTrue(storages.length > 0); final String storageID=storages[0].getStorageID(); assertTrue(storages[0].addBlock(blk)); assertEquals(1,dd.numBlocks()); assertFalse(dd.removeBlock(blk1)); assertEquals(1,dd.numBlocks()); assertFalse(storages[0].addBlock(blk)); assertEquals(1,dd.numBlocks()); assertTrue(storages[0].addBlock(blk1)); assertEquals(2,dd.numBlocks()); assertTrue(dd.removeBlock(blk)); assertEquals(1,dd.numBlocks()); assertTrue(dd.removeBlock(blk1)); assertEquals(0,dd.numBlocks()); }

Class: org.apache.hadoop.hdfs.server.blockmanagement.TestHost2NodesMap

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testRemove() throws Exception { DatanodeDescriptor nodeNotInMap=DFSTestUtil.getDatanodeDescriptor("3.3.3.3","/d1/r4"); assertFalse(map.remove(nodeNotInMap)); assertTrue(map.remove(dataNodes[0])); assertTrue(map.getDatanodeByHost("1.1.1.1.") == null); assertTrue(map.getDatanodeByHost("2.2.2.2") == dataNodes[1]); DatanodeDescriptor node=map.getDatanodeByHost("3.3.3.3"); assertTrue(node == dataNodes[2] || node == dataNodes[3]); assertNull(map.getDatanodeByHost("4.4.4.4")); assertTrue(map.remove(dataNodes[2])); assertNull(map.getDatanodeByHost("1.1.1.1")); assertEquals(map.getDatanodeByHost("2.2.2.2"),dataNodes[1]); assertEquals(map.getDatanodeByHost("3.3.3.3"),dataNodes[3]); assertTrue(map.remove(dataNodes[3])); assertNull(map.getDatanodeByHost("1.1.1.1")); assertEquals(map.getDatanodeByHost("2.2.2.2"),dataNodes[1]); assertNull(map.getDatanodeByHost("3.3.3.3")); assertFalse(map.remove(null)); assertTrue(map.remove(dataNodes[1])); assertFalse(map.remove(dataNodes[1])); }

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testGetDatanodeByHost() throws Exception { assertEquals(map.getDatanodeByHost("1.1.1.1"),dataNodes[0]); assertEquals(map.getDatanodeByHost("2.2.2.2"),dataNodes[1]); DatanodeDescriptor node=map.getDatanodeByHost("3.3.3.3"); assertTrue(node == dataNodes[2] || node == dataNodes[3]); assertNull(map.getDatanodeByHost("4.4.4.4")); }

Class: org.apache.hadoop.hdfs.server.blockmanagement.TestOverReplicatedBlocks

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test processOverReplicatedBlock can handle corrupt replicas fine. * It make sure that it won't treat corrupt replicas as valid ones * thus prevents NN deleting valid replicas but keeping * corrupt ones. */ @Test public void testProcesOverReplicateBlock() throws Exception { Configuration conf=new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,1000L); conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY,Integer.toString(2)); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); FileSystem fs=cluster.getFileSystem(); try { final Path fileName=new Path("/foo1"); DFSTestUtil.createFile(fs,fileName,2,(short)3,0L); DFSTestUtil.waitReplication(fs,fileName,(short)3); ExtendedBlock block=DFSTestUtil.getFirstBlock(fs,fileName); assertTrue(TestDatanodeBlockScanner.corruptReplica(block,0)); DataNodeProperties dnProps=cluster.stopDataNode(0); File scanLog=new File(MiniDFSCluster.getFinalizedDir(cluster.getInstanceStorageDir(0,0),cluster.getNamesystem().getBlockPoolId()).getParent().toString() + "/../dncp_block_verification.log.prev"); for (int i=0; !scanLog.delete(); i++) { assertTrue("Could not delete log file in one minute",i < 60); try { Thread.sleep(1000); } catch ( InterruptedException ignored) { } } cluster.restartDataNode(dnProps); DFSTestUtil.waitReplication(fs,fileName,(short)2); String blockPoolId=cluster.getNamesystem().getBlockPoolId(); final DatanodeID corruptDataNode=DataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(2),blockPoolId); final FSNamesystem namesystem=cluster.getNamesystem(); final BlockManager bm=namesystem.getBlockManager(); final HeartbeatManager hm=bm.getDatanodeManager().getHeartbeatManager(); try { namesystem.writeLock(); synchronized (hm) { String corruptMachineName=corruptDataNode.getXferAddr(); for ( DatanodeDescriptor datanode : hm.getDatanodes()) { if (!corruptMachineName.equals(datanode.getXferAddr())) { datanode.getStorageInfos()[0].setUtilizationForTesting(100L,100L,0,100L); datanode.updateHeartbeat(BlockManagerTestUtil.getStorageReportsForDatanode(datanode),0L,0L,0,0); } } NameNodeAdapter.setReplication(namesystem,fileName.toString(),(short)1); assertEquals(1,bm.countNodes(block.getLocalBlock()).liveReplicas()); } } finally { namesystem.writeUnlock(); } } finally { cluster.shutdown(); } }

Class: org.apache.hadoop.hdfs.server.blockmanagement.TestPendingDataNodeMessages

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testQueues(){ DatanodeDescriptor fakeDN=DFSTestUtil.getLocalDatanodeDescriptor(); DatanodeStorage storage=new DatanodeStorage("STORAGE_ID"); DatanodeStorageInfo storageInfo=new DatanodeStorageInfo(fakeDN,storage); msgs.enqueueReportedBlock(storageInfo,block1Gs1,ReplicaState.FINALIZED); msgs.enqueueReportedBlock(storageInfo,block1Gs2,ReplicaState.FINALIZED); assertEquals(2,msgs.count()); assertNull(msgs.takeBlockQueue(block2Gs1)); assertEquals(2,msgs.count()); Queue q=msgs.takeBlockQueue(block1Gs2DifferentInstance); assertEquals("ReportedBlockInfo [block=blk_1_1, dn=127.0.0.1:50010, reportedState=FINALIZED]," + "ReportedBlockInfo [block=blk_1_2, dn=127.0.0.1:50010, reportedState=FINALIZED]",Joiner.on(",").join(q)); assertEquals(0,msgs.count()); assertNull(msgs.takeBlockQueue(block1Gs1)); assertEquals(0,msgs.count()); }

Class: org.apache.hadoop.hdfs.server.blockmanagement.TestPendingReplication

IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testPendingReplication(){ PendingReplicationBlocks pendingReplications; pendingReplications=new PendingReplicationBlocks(TIMEOUT * 1000); pendingReplications.start(); DatanodeStorageInfo[] storages=DFSTestUtil.createDatanodeStorageInfos(10); for (int i=0; i < storages.length; i++) { Block block=new Block(i,i,0); DatanodeStorageInfo[] targets=new DatanodeStorageInfo[i]; System.arraycopy(storages,0,targets,0,i); pendingReplications.increment(block,DatanodeStorageInfo.toDatanodeDescriptors(targets)); } assertEquals("Size of pendingReplications ",10,pendingReplications.size()); Block blk=new Block(8,8,0); pendingReplications.decrement(blk,storages[7].getDatanodeDescriptor()); assertEquals("pendingReplications.getNumReplicas ",7,pendingReplications.getNumReplicas(blk)); for (int i=0; i < 7; i++) { pendingReplications.decrement(blk,storages[i].getDatanodeDescriptor()); } assertTrue(pendingReplications.size() == 9); pendingReplications.increment(blk,DatanodeStorageInfo.toDatanodeDescriptors(DFSTestUtil.createDatanodeStorageInfos(8))); assertTrue(pendingReplications.size() == 10); for (int i=0; i < 10; i++) { Block block=new Block(i,i,0); int numReplicas=pendingReplications.getNumReplicas(block); assertTrue(numReplicas == i); } assertTrue(pendingReplications.getTimedOutBlocks() == null); try { Thread.sleep(1000); } catch ( Exception e) { } for (int i=10; i < 15; i++) { Block block=new Block(i,i,0); pendingReplications.increment(block,DatanodeStorageInfo.toDatanodeDescriptors(DFSTestUtil.createDatanodeStorageInfos(i))); } assertTrue(pendingReplications.size() == 15); int loop=0; while (pendingReplications.size() > 0) { try { Thread.sleep(1000); } catch ( Exception e) { } loop++; } System.out.println("Had to wait for " + loop + " seconds for the lot to timeout"); assertEquals("Size of pendingReplications ",0,pendingReplications.size()); Block[] timedOut=pendingReplications.getTimedOutBlocks(); assertTrue(timedOut != null && timedOut.length == 15); for (int i=0; i < timedOut.length; i++) { assertTrue(timedOut[i].getBlockId() < 15); } pendingReplications.stop(); }

Class: org.apache.hadoop.hdfs.server.blockmanagement.TestRBWBlockInvalidation

APIUtilityVerifier InternalCallVerifier BooleanVerifier AssumptionSetter EqualityVerifier HybridVerifier 
/** * Test when a block's replica is removed from RBW folder in one of the * datanode, namenode should ask to invalidate that corrupted block and * schedule replication for one more replica for that under replicated block. */ @Test(timeout=600000) public void testBlockInvalidationWhenRBWReplicaMissedInDN() throws IOException, InterruptedException { assumeTrue(!Path.WINDOWS); Configuration conf=new HdfsConfiguration(); conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY,2); conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,300); conf.setLong(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY,1); conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,1); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); FSDataOutputStream out=null; try { final FSNamesystem namesystem=cluster.getNamesystem(); FileSystem fs=cluster.getFileSystem(); Path testPath=new Path("/tmp/TestRBWBlockInvalidation","foo1"); out=fs.create(testPath,(short)2); out.writeBytes("HDFS-3157: " + testPath); out.hsync(); cluster.startDataNodes(conf,1,true,null,null,null); String bpid=namesystem.getBlockPoolId(); ExtendedBlock blk=DFSTestUtil.getFirstBlock(fs,testPath); Block block=blk.getLocalBlock(); DataNode dn=cluster.getDataNodes().get(0); File blockFile=DataNodeTestUtils.getBlockFile(dn,bpid,block); File metaFile=DataNodeTestUtils.getMetaFile(dn,bpid,block); assertTrue("Could not delete the block file from the RBW folder",blockFile.delete()); assertTrue("Could not delete the block meta file from the RBW folder",metaFile.delete()); out.close(); int liveReplicas=0; while (true) { if ((liveReplicas=countReplicas(namesystem,blk).liveReplicas()) < 2) { LOG.info("Live Replicas after corruption: " + liveReplicas); break; } Thread.sleep(100); } assertEquals("There should be less than 2 replicas in the " + "liveReplicasMap",1,liveReplicas); while (true) { if ((liveReplicas=countReplicas(namesystem,blk).liveReplicas()) > 1) { LOG.info("Live Replicas after Rereplication: " + liveReplicas); break; } Thread.sleep(100); } assertEquals("There should be two live replicas",2,liveReplicas); while (true) { Thread.sleep(100); if (countReplicas(namesystem,blk).corruptReplicas() == 0) { LOG.info("Corrupt Replicas becomes 0"); break; } } } finally { if (out != null) { out.close(); } cluster.shutdown(); } }

Class: org.apache.hadoop.hdfs.server.blockmanagement.TestReplicationPolicy

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * In this testcase, client is is a node outside of file system. * So the 1st replica can be placed on any node. * the 2nd replica should be placed on a different rack, * the 3rd replica should be placed on the same rack as the 2nd replica, * @throws Exception */ @Test public void testChooseTarget5() throws Exception { DatanodeDescriptor writerDesc=DFSTestUtil.getDatanodeDescriptor("7.7.7.7","/d2/r4"); DatanodeStorageInfo[] targets; targets=chooseTarget(0,writerDesc); assertEquals(targets.length,0); targets=chooseTarget(1,writerDesc); assertEquals(targets.length,1); targets=chooseTarget(2,writerDesc); assertEquals(targets.length,2); assertFalse(isOnSameRack(targets[0],targets[1])); targets=chooseTarget(3,writerDesc); assertEquals(targets.length,3); assertTrue(isOnSameRack(targets[1],targets[2])); assertFalse(isOnSameRack(targets[0],targets[1])); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testChooseTargetWithStaleNodes() throws Exception { dataNodes[0].setLastUpdate(Time.now() - staleInterval - 1); namenode.getNamesystem().getBlockManager().getDatanodeManager().getHeartbeatManager().heartbeatCheck(); assertTrue(namenode.getNamesystem().getBlockManager().getDatanodeManager().shouldAvoidStaleDataNodesForWrite()); DatanodeStorageInfo[] targets; targets=chooseTarget(1); assertEquals(targets.length,1); assertEquals(storages[1],targets[0]); Set excludedNodes=new HashSet(); excludedNodes.add(dataNodes[1]); List chosenNodes=new ArrayList(); targets=chooseTarget(1,chosenNodes,excludedNodes); assertEquals(targets.length,1); assertFalse(isOnSameRack(targets[0],dataNodes[0])); dataNodes[0].setLastUpdate(Time.now()); namenode.getNamesystem().getBlockManager().getDatanodeManager().getHeartbeatManager().heartbeatCheck(); }

IterativeVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * In this testcase, client is dataNodes[0], but dataNodes[0] is not qualified * to be chosen. So the 1st replica should be placed on dataNodes[1], * the 2nd replica should be placed on a different rack, * the 3rd replica should be placed on the same rack as the 2nd replica, * and the rest should be placed on the third rack. * @throws Exception */ @Test public void testChooseTarget3() throws Exception { updateHeartbeatWithUsage(dataNodes[0],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,(HdfsConstants.MIN_BLOCKS_FOR_WRITE - 1) * BLOCK_SIZE,0L,0L,0L,0,0); DatanodeStorageInfo[] targets; targets=chooseTarget(0); assertEquals(targets.length,0); targets=chooseTarget(1); assertEquals(targets.length,1); assertEquals(storages[1],targets[0]); targets=chooseTarget(2); assertEquals(targets.length,2); assertEquals(storages[1],targets[0]); assertFalse(isOnSameRack(targets[0],targets[1])); targets=chooseTarget(3); assertEquals(targets.length,3); assertEquals(storages[1],targets[0]); assertTrue(isOnSameRack(targets[1],targets[2])); assertFalse(isOnSameRack(targets[0],targets[1])); targets=chooseTarget(4); assertEquals(targets.length,4); assertEquals(storages[1],targets[0]); for (int i=1; i < 4; i++) { assertFalse(isOnSameRack(targets[0],targets[i])); } assertTrue(isOnSameRack(targets[1],targets[2]) || isOnSameRack(targets[2],targets[3])); assertFalse(isOnSameRack(targets[1],targets[3])); updateHeartbeatWithUsage(dataNodes[0],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,0L,0L,0,0); }

BooleanVerifier EqualityVerifier HybridVerifier 
/** * In this testcase, we set 3 nodes (dataNodes[0] ~ dataNodes[2]) as stale, * and when the number of replicas is less or equal to 3, all the healthy * datanodes should be returned by the chooseTarget method. When the number * of replicas is 4, a stale node should be included. * @throws Exception */ @Test public void testChooseTargetWithHalfStaleNodes() throws Exception { for (int i=0; i < 3; i++) { dataNodes[i].setLastUpdate(Time.now() - staleInterval - 1); } namenode.getNamesystem().getBlockManager().getDatanodeManager().getHeartbeatManager().heartbeatCheck(); DatanodeStorageInfo[] targets=chooseTarget(0); assertEquals(targets.length,0); targets=chooseTarget(1); assertEquals(targets.length,1); assertFalse(containsWithinRange(targets[0],dataNodes,0,2)); targets=chooseTarget(2); assertEquals(targets.length,2); assertFalse(containsWithinRange(targets[0],dataNodes,0,2)); assertFalse(containsWithinRange(targets[1],dataNodes,0,2)); targets=chooseTarget(3); assertEquals(targets.length,3); assertTrue(containsWithinRange(targets[0],dataNodes,3,5)); assertTrue(containsWithinRange(targets[1],dataNodes,3,5)); assertTrue(containsWithinRange(targets[2],dataNodes,3,5)); targets=chooseTarget(4); assertEquals(targets.length,4); assertTrue(containsWithinRange(dataNodes[3],targets,0,3)); assertTrue(containsWithinRange(dataNodes[4],targets,0,3)); assertTrue(containsWithinRange(dataNodes[5],targets,0,3)); for (int i=0; i < dataNodes.length; i++) { dataNodes[i].setLastUpdate(Time.now()); } namenode.getNamesystem().getBlockManager().getDatanodeManager().getHeartbeatManager().heartbeatCheck(); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
@Test public void testChooseTargetWithMoreThanHalfStaleNodes() throws Exception { HdfsConfiguration conf=new HdfsConfiguration(); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_KEY,true); String[] hosts=new String[]{"host1","host2","host3","host4","host5","host6"}; String[] racks=new String[]{"/d1/r1","/d1/r1","/d1/r2","/d1/r2","/d2/r3","/d2/r3"}; MiniDFSCluster miniCluster=new MiniDFSCluster.Builder(conf).racks(racks).hosts(hosts).numDataNodes(hosts.length).build(); miniCluster.waitActive(); try { for (int i=0; i < 2; i++) { DataNode dn=miniCluster.getDataNodes().get(i); DataNodeTestUtils.setHeartbeatsDisabledForTests(dn,true); miniCluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().getDatanode(dn.getDatanodeId()).setLastUpdate(Time.now() - staleInterval - 1); } miniCluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().getHeartbeatManager().heartbeatCheck(); int numStaleNodes=miniCluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().getNumStaleNodes(); assertEquals(numStaleNodes,2); assertTrue(miniCluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().shouldAvoidStaleDataNodesForWrite()); DatanodeDescriptor staleNodeInfo=miniCluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().getDatanode(miniCluster.getDataNodes().get(0).getDatanodeId()); BlockPlacementPolicy replicator=miniCluster.getNameNode().getNamesystem().getBlockManager().getBlockPlacementPolicy(); DatanodeStorageInfo[] targets=replicator.chooseTarget(filename,3,staleNodeInfo,new ArrayList(),false,null,BLOCK_SIZE,StorageType.DEFAULT); assertEquals(targets.length,3); assertFalse(isOnSameRack(targets[0],staleNodeInfo)); for (int i=0; i < 4; i++) { DataNode dn=miniCluster.getDataNodes().get(i); DataNodeTestUtils.setHeartbeatsDisabledForTests(dn,true); miniCluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().getDatanode(dn.getDatanodeId()).setLastUpdate(Time.now() - staleInterval - 1); } miniCluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().getHeartbeatManager().heartbeatCheck(); numStaleNodes=miniCluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().getNumStaleNodes(); assertEquals(numStaleNodes,4); assertFalse(miniCluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().shouldAvoidStaleDataNodesForWrite()); targets=replicator.chooseTarget(filename,3,staleNodeInfo,new ArrayList(),false,null,BLOCK_SIZE,StorageType.DEFAULT); assertEquals(targets.length,3); assertTrue(isOnSameRack(targets[0],staleNodeInfo)); for (int i=2; i < 4; i++) { DataNode dn=miniCluster.getDataNodes().get(i); DataNodeTestUtils.setHeartbeatsDisabledForTests(dn,false); miniCluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().getDatanode(dn.getDatanodeId()).setLastUpdate(Time.now()); } miniCluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().getHeartbeatManager().heartbeatCheck(); numStaleNodes=miniCluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().getNumStaleNodes(); assertEquals(numStaleNodes,2); assertTrue(miniCluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().shouldAvoidStaleDataNodesForWrite()); targets=chooseTarget(3,staleNodeInfo); assertEquals(targets.length,3); assertFalse(isOnSameRack(targets[0],staleNodeInfo)); } finally { miniCluster.shutdown(); } }

IterativeVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * In this testcase, client is dataNodes[0], but none of the nodes on rack 1 * is qualified to be chosen. So the 1st replica should be placed on either * rack 2 or rack 3. * the 2nd replica should be placed on a different rack, * the 3rd replica should be placed on the same rack as the 1st replica, * @throws Exception */ @Test public void testChoooseTarget4() throws Exception { for (int i=0; i < 2; i++) { updateHeartbeatWithUsage(dataNodes[i],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,(HdfsConstants.MIN_BLOCKS_FOR_WRITE - 1) * BLOCK_SIZE,0L,0L,0L,0,0); } DatanodeStorageInfo[] targets; targets=chooseTarget(0); assertEquals(targets.length,0); targets=chooseTarget(1); assertEquals(targets.length,1); assertFalse(isOnSameRack(targets[0],dataNodes[0])); targets=chooseTarget(2); assertEquals(targets.length,2); assertFalse(isOnSameRack(targets[0],dataNodes[0])); assertFalse(isOnSameRack(targets[0],targets[1])); targets=chooseTarget(3); assertEquals(targets.length,3); for (int i=0; i < 3; i++) { assertFalse(isOnSameRack(targets[i],dataNodes[0])); } assertTrue(isOnSameRack(targets[0],targets[1]) || isOnSameRack(targets[1],targets[2])); assertFalse(isOnSameRack(targets[0],targets[2])); for (int i=0; i < 2; i++) { updateHeartbeatWithUsage(dataNodes[i],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,0L,0L,0,0); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * This testcase tests whether the value returned by * DFSUtil.getReplWorkMultiplier() is positive, * and whether an IllegalArgumentException will be thrown * when a non-positive value is retrieved */ @Test public void testGetReplWorkMultiplier(){ Configuration conf=new Configuration(); int blocksReplWorkMultiplier=DFSUtil.getReplWorkMultiplier(conf); assertTrue(blocksReplWorkMultiplier > 0); conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION,"3"); blocksReplWorkMultiplier=DFSUtil.getReplWorkMultiplier(conf); assertEquals(blocksReplWorkMultiplier,3); conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION,"-1"); exception.expect(IllegalArgumentException.class); blocksReplWorkMultiplier=DFSUtil.getReplWorkMultiplier(conf); }

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * This testcase tests re-replication, when dataNodes[0] is already chosen. * So the 1st replica can be placed on random rack. * the 2nd replica should be placed on different node by same rack as * the 1st replica. The 3rd replica can be placed randomly. * @throws Exception */ @Test public void testRereplicate1() throws Exception { List chosenNodes=new ArrayList(); chosenNodes.add(storages[0]); DatanodeStorageInfo[] targets; targets=chooseTarget(0,chosenNodes); assertEquals(targets.length,0); targets=chooseTarget(1,chosenNodes); assertEquals(targets.length,1); assertFalse(isOnSameRack(targets[0],dataNodes[0])); targets=chooseTarget(2,chosenNodes); assertEquals(targets.length,2); assertTrue(isOnSameRack(targets[0],dataNodes[0])); assertFalse(isOnSameRack(targets[0],targets[1])); targets=chooseTarget(3,chosenNodes); assertEquals(targets.length,3); assertTrue(isOnSameRack(targets[0],dataNodes[0])); assertFalse(isOnSameRack(targets[0],targets[2])); }

BooleanVerifier EqualityVerifier HybridVerifier 
/** * In this testcase, client is dataNodes[0]. So the 1st replica should be * placed on dataNodes[0], the 2nd replica should be placed on * different rack and third should be placed on different node * of rack chosen for 2nd node. * The only excpetion is when the numOfReplicas is 2, * the 1st is on dataNodes[0] and the 2nd is on a different rack. * @throws Exception */ @Test public void testChooseTarget1() throws Exception { updateHeartbeatWithUsage(dataNodes[0],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,0L,0L,4,0); DatanodeStorageInfo[] targets; targets=chooseTarget(0); assertEquals(targets.length,0); targets=chooseTarget(1); assertEquals(targets.length,1); assertEquals(storages[0],targets[0]); targets=chooseTarget(2); assertEquals(targets.length,2); assertEquals(storages[0],targets[0]); assertFalse(isOnSameRack(targets[0],targets[1])); targets=chooseTarget(3); assertEquals(targets.length,3); assertEquals(storages[0],targets[0]); assertFalse(isOnSameRack(targets[0],targets[1])); assertTrue(isOnSameRack(targets[1],targets[2])); targets=chooseTarget(4); assertEquals(targets.length,4); assertEquals(storages[0],targets[0]); assertTrue(isOnSameRack(targets[1],targets[2]) || isOnSameRack(targets[2],targets[3])); assertFalse(isOnSameRack(targets[0],targets[2])); updateHeartbeatWithUsage(dataNodes[0],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,0L,0L,0,0); }

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * In this testcase, client is dataNodes[0], but the dataNodes[1] is * not allowed to be chosen. So the 1st replica should be * placed on dataNodes[0], the 2nd replica should be placed on a different * rack, the 3rd should be on same rack as the 2nd replica, and the rest * should be placed on a third rack. * @throws Exception */ @Test public void testChooseTarget2() throws Exception { Set excludedNodes; DatanodeStorageInfo[] targets; List chosenNodes=new ArrayList(); excludedNodes=new HashSet(); excludedNodes.add(dataNodes[1]); targets=chooseTarget(0,chosenNodes,excludedNodes); assertEquals(targets.length,0); excludedNodes.clear(); chosenNodes.clear(); excludedNodes.add(dataNodes[1]); targets=chooseTarget(1,chosenNodes,excludedNodes); assertEquals(targets.length,1); assertEquals(storages[0],targets[0]); excludedNodes.clear(); chosenNodes.clear(); excludedNodes.add(dataNodes[1]); targets=chooseTarget(2,chosenNodes,excludedNodes); assertEquals(targets.length,2); assertEquals(storages[0],targets[0]); assertFalse(isOnSameRack(targets[0],targets[1])); excludedNodes.clear(); chosenNodes.clear(); excludedNodes.add(dataNodes[1]); targets=chooseTarget(3,chosenNodes,excludedNodes); assertEquals(targets.length,3); assertEquals(storages[0],targets[0]); assertFalse(isOnSameRack(targets[0],targets[1])); assertTrue(isOnSameRack(targets[1],targets[2])); excludedNodes.clear(); chosenNodes.clear(); excludedNodes.add(dataNodes[1]); targets=chooseTarget(4,chosenNodes,excludedNodes); assertEquals(targets.length,4); assertEquals(storages[0],targets[0]); for (int i=1; i < 4; i++) { assertFalse(isOnSameRack(targets[0],targets[i])); } assertTrue(isOnSameRack(targets[1],targets[2]) || isOnSameRack(targets[2],targets[3])); assertFalse(isOnSameRack(targets[1],targets[3])); excludedNodes.clear(); chosenNodes.clear(); excludedNodes.add(dataNodes[1]); chosenNodes.add(storages[2]); targets=replicator.chooseTarget(filename,1,dataNodes[0],chosenNodes,true,excludedNodes,BLOCK_SIZE,StorageType.DEFAULT); System.out.println("targets=" + Arrays.asList(targets)); assertEquals(2,targets.length); int i=0; for (; i < targets.length && !storages[2].equals(targets[i]); i++) ; assertTrue(i < targets.length); }

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * This testcase tests re-replication, * when dataNodes[0] and dataNodes[2] are already chosen. * So the 1st replica should be placed on the rack that the writer resides. * the rest replicas can be placed randomly, * @throws Exception */ @Test public void testRereplicate3() throws Exception { List chosenNodes=new ArrayList(); chosenNodes.add(storages[0]); chosenNodes.add(storages[2]); DatanodeStorageInfo[] targets; targets=chooseTarget(0,chosenNodes); assertEquals(targets.length,0); targets=chooseTarget(1,chosenNodes); assertEquals(targets.length,1); assertTrue(isOnSameRack(targets[0],dataNodes[0])); assertFalse(isOnSameRack(targets[0],dataNodes[2])); targets=chooseTarget(1,dataNodes[2],chosenNodes); assertEquals(targets.length,1); assertTrue(isOnSameRack(targets[0],dataNodes[2])); assertFalse(isOnSameRack(targets[0],dataNodes[0])); targets=chooseTarget(2,chosenNodes); assertEquals(targets.length,2); assertTrue(isOnSameRack(targets[0],dataNodes[0])); targets=chooseTarget(2,dataNodes[2],chosenNodes); assertEquals(targets.length,2); assertTrue(isOnSameRack(targets[0],dataNodes[2])); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * In this testcase, it tries to choose more targets than available nodes and * check the result. * @throws Exception */ @Test public void testChooseTargetWithMoreThanAvailableNodes() throws Exception { for (int i=0; i < 2; i++) { updateHeartbeatWithUsage(dataNodes[i],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,(HdfsConstants.MIN_BLOCKS_FOR_WRITE - 1) * BLOCK_SIZE,0L,0L,0L,0,0); } final LogVerificationAppender appender=new LogVerificationAppender(); final Logger logger=Logger.getRootLogger(); logger.addAppender(appender); DatanodeStorageInfo[] targets=chooseTarget(NUM_OF_DATANODES); assertEquals(targets.length,NUM_OF_DATANODES - 2); final List log=appender.getLog(); assertNotNull(log); assertFalse(log.size() == 0); final LoggingEvent lastLogEntry=log.get(log.size() - 1); assertTrue(Level.WARN.isGreaterOrEqual(lastLogEntry.getLevel())); assertTrue(((String)lastLogEntry.getMessage()).contains("in need of 2")); for (int i=0; i < 2; i++) { updateHeartbeatWithUsage(dataNodes[i],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,0L,0L,0,0); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * This testcase tests whether the default value returned by * DFSUtil.getInvalidateWorkPctPerIteration() is positive, * and whether an IllegalArgumentException will be thrown * when 0.0f is retrieved */ @Test public void testGetInvalidateWorkPctPerIteration(){ Configuration conf=new Configuration(); float blocksInvalidateWorkPct=DFSUtil.getInvalidateWorkPctPerIteration(conf); assertTrue(blocksInvalidateWorkPct > 0); conf.set(DFSConfigKeys.DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION,"0.5f"); blocksInvalidateWorkPct=DFSUtil.getInvalidateWorkPctPerIteration(conf); assertEquals(blocksInvalidateWorkPct,0.5f,blocksInvalidateWorkPct * 1e-7); conf.set(DFSConfigKeys.DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION,"1.0f"); blocksInvalidateWorkPct=DFSUtil.getInvalidateWorkPctPerIteration(conf); assertEquals(blocksInvalidateWorkPct,1.0f,blocksInvalidateWorkPct * 1e-7); conf.set(DFSConfigKeys.DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION,"0.0f"); exception.expect(IllegalArgumentException.class); blocksInvalidateWorkPct=DFSUtil.getInvalidateWorkPctPerIteration(conf); }

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * This testcase tests re-replication, * when dataNodes[0] and dataNodes[1] are already chosen. * So the 1st replica should be placed on a different rack than rack 1. * the rest replicas can be placed randomly, * @throws Exception */ @Test public void testRereplicate2() throws Exception { List chosenNodes=new ArrayList(); chosenNodes.add(storages[0]); chosenNodes.add(storages[1]); DatanodeStorageInfo[] targets; targets=chooseTarget(0,chosenNodes); assertEquals(targets.length,0); targets=chooseTarget(1,chosenNodes); assertEquals(targets.length,1); assertFalse(isOnSameRack(targets[0],dataNodes[0])); targets=chooseTarget(2,chosenNodes); assertEquals(targets.length,2); assertFalse(isOnSameRack(targets[0],dataNodes[0])); assertFalse(isOnSameRack(targets[1],dataNodes[0])); }

Class: org.apache.hadoop.hdfs.server.blockmanagement.TestReplicationPolicyConsiderLoad

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Tests that chooseTarget with considerLoad set to true correctly calculates * load with decommissioned nodes. */ @Test public void testChooseTargetWithDecomNodes() throws IOException { namenode.getNamesystem().writeLock(); try { String blockPoolId=namenode.getNamesystem().getBlockPoolId(); dnManager.handleHeartbeat(dnrList.get(3),BlockManagerTestUtil.getStorageReportsForDatanode(dataNodes[3]),blockPoolId,dataNodes[3].getCacheCapacity(),dataNodes[3].getCacheRemaining(),2,0,0); dnManager.handleHeartbeat(dnrList.get(4),BlockManagerTestUtil.getStorageReportsForDatanode(dataNodes[4]),blockPoolId,dataNodes[4].getCacheCapacity(),dataNodes[4].getCacheRemaining(),4,0,0); dnManager.handleHeartbeat(dnrList.get(5),BlockManagerTestUtil.getStorageReportsForDatanode(dataNodes[5]),blockPoolId,dataNodes[5].getCacheCapacity(),dataNodes[5].getCacheRemaining(),4,0,0); final int load=2 + 4 + 4; FSNamesystem fsn=namenode.getNamesystem(); assertEquals((double)load / 6,fsn.getInServiceXceiverAverage(),EPSILON); for (int i=0; i < 3; i++) { DatanodeDescriptor d=dnManager.getDatanode(dnrList.get(i)); dnManager.startDecommission(d); d.setDecommissioned(); } assertEquals((double)load / 3,fsn.getInServiceXceiverAverage(),EPSILON); DatanodeStorageInfo[] targets=namenode.getNamesystem().getBlockManager().getBlockPlacementPolicy().chooseTarget("testFile.txt",3,dataNodes[0],new ArrayList(),false,null,1024,StorageType.DEFAULT); assertEquals(3,targets.length); Set targetSet=new HashSet(Arrays.asList(targets)); for (int i=3; i < storages.length; i++) { assertTrue(targetSet.contains(storages[i])); } } finally { dataNodes[0].stopDecommission(); dataNodes[1].stopDecommission(); dataNodes[2].stopDecommission(); namenode.getNamesystem().writeUnlock(); } NameNode.LOG.info("Done working on it"); }

Class: org.apache.hadoop.hdfs.server.blockmanagement.TestReplicationPolicyWithNodeGroup

APIUtilityVerifier IterativeVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testChooseTargetWithDependencies() throws Exception { for (int i=0; i < NUM_OF_DATANODES; i++) { cluster.remove(dataNodes[i]); } for (int i=0; i < NUM_OF_DATANODES_MORE_TARGETS; i++) { DatanodeDescriptor node=dataNodesInMoreTargetsCase[i]; if (cluster.contains(node)) { cluster.remove(node); } } Host2NodesMap host2DatanodeMap=namenode.getNamesystem().getBlockManager().getDatanodeManager().getHost2DatanodeMap(); for (int i=0; i < NUM_OF_DATANODES_FOR_DEPENDENCIES; i++) { cluster.add(dataNodesForDependencies[i]); host2DatanodeMap.add(dataNodesForDependencies[i]); } dataNodesForDependencies[1].addDependentHostName(dataNodesForDependencies[2].getHostName()); dataNodesForDependencies[2].addDependentHostName(dataNodesForDependencies[1].getHostName()); dataNodesForDependencies[3].addDependentHostName(dataNodesForDependencies[4].getHostName()); dataNodesForDependencies[4].addDependentHostName(dataNodesForDependencies[3].getHostName()); for (int i=0; i < NUM_OF_DATANODES_FOR_DEPENDENCIES; i++) { updateHeartbeatWithUsage(dataNodesForDependencies[i],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,0L,0L,0,0); } List chosenNodes=new ArrayList(); DatanodeStorageInfo[] targets; Set excludedNodes=new HashSet(); excludedNodes.add(dataNodesForDependencies[5]); targets=chooseTarget(3,dataNodesForDependencies[1],chosenNodes,excludedNodes); assertEquals(targets.length,2); assertEquals(targets[0],storagesForDependencies[1]); assertTrue(targets[1].equals(storagesForDependencies[3]) || targets[1].equals(storagesForDependencies[4])); assertEquals(excludedNodes.size(),NUM_OF_DATANODES_FOR_DEPENDENCIES); for (int i=0; i < NUM_OF_DATANODES_FOR_DEPENDENCIES; i++) { assertTrue(excludedNodes.contains(dataNodesForDependencies[i])); } }

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * This testcase tests re-replication, * when dataNodes[0] and dataNodes[3] are already chosen. * So the 1st replica should be placed on the rack that the writer resides. * the rest replicas can be placed randomly, * @throws Exception */ @Test public void testRereplicate3() throws Exception { setupDataNodeCapacity(); List chosenNodes=new ArrayList(); chosenNodes.add(storages[0]); chosenNodes.add(storages[3]); DatanodeStorageInfo[] targets; targets=chooseTarget(0,chosenNodes); assertEquals(targets.length,0); targets=chooseTarget(1,chosenNodes); assertEquals(targets.length,1); assertTrue(isOnSameRack(dataNodes[0],targets[0])); assertFalse(isOnSameRack(dataNodes[3],targets[0])); targets=chooseTarget(1,dataNodes[3],chosenNodes); assertEquals(targets.length,1); assertTrue(isOnSameRack(dataNodes[3],targets[0])); assertFalse(isOnSameNodeGroup(dataNodes[3],targets[0])); assertFalse(isOnSameRack(dataNodes[0],targets[0])); targets=chooseTarget(2,chosenNodes); assertEquals(targets.length,2); assertTrue(isOnSameRack(dataNodes[0],targets[0])); assertFalse(isOnSameNodeGroup(dataNodes[0],targets[0])); targets=chooseTarget(2,dataNodes[3],chosenNodes); assertEquals(targets.length,2); assertTrue(isOnSameRack(dataNodes[3],targets[0])); }

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * This testcase tests re-replication, * when dataNodes[0] and dataNodes[1] are already chosen. * So the 1st replica should be placed on a different rack of rack 1. * the rest replicas can be placed randomly, * @throws Exception */ @Test public void testRereplicate2() throws Exception { setupDataNodeCapacity(); List chosenNodes=new ArrayList(); chosenNodes.add(storages[0]); chosenNodes.add(storages[1]); DatanodeStorageInfo[] targets; targets=chooseTarget(0,chosenNodes); assertEquals(targets.length,0); targets=chooseTarget(1,chosenNodes); assertEquals(targets.length,1); assertFalse(isOnSameRack(dataNodes[0],targets[0])); targets=chooseTarget(2,chosenNodes); assertEquals(targets.length,2); assertFalse(isOnSameRack(dataNodes[0],targets[0]) && isOnSameRack(dataNodes[0],targets[1])); }

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * This testcase tests re-replication, when dataNodes[0] is already chosen. * So the 1st replica can be placed on random rack. * the 2nd replica should be placed on different node and nodegroup by same rack as * the 1st replica. The 3rd replica can be placed randomly. * @throws Exception */ @Test public void testRereplicate1() throws Exception { setupDataNodeCapacity(); List chosenNodes=new ArrayList(); chosenNodes.add(storages[0]); DatanodeStorageInfo[] targets; targets=chooseTarget(0,chosenNodes); assertEquals(targets.length,0); targets=chooseTarget(1,chosenNodes); assertEquals(targets.length,1); assertFalse(isOnSameRack(dataNodes[0],targets[0])); targets=chooseTarget(2,chosenNodes); assertEquals(targets.length,2); assertTrue(isOnSameRack(dataNodes[0],targets[0])); assertFalse(isOnSameRack(targets[0],targets[1])); targets=chooseTarget(3,chosenNodes); assertEquals(targets.length,3); assertTrue(isOnSameRack(dataNodes[0],targets[0])); assertFalse(isOnSameNodeGroup(dataNodes[0],targets[0])); assertFalse(isOnSameRack(targets[0],targets[2])); }

BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test replica placement policy in case of boundary topology. * Rack 2 has only 1 node group & can't be placed with two replicas * The 1st replica will be placed on writer. * The 2nd replica should be placed on a different rack * The 3rd replica should be placed on the same rack with writer, but on a * different node group. */ @Test public void testChooseTargetsOnBoundaryTopology() throws Exception { for (int i=0; i < NUM_OF_DATANODES; i++) { cluster.remove(dataNodes[i]); } for (int i=0; i < NUM_OF_DATANODES_BOUNDARY; i++) { cluster.add(dataNodesInBoundaryCase[i]); } for (int i=0; i < NUM_OF_DATANODES_BOUNDARY; i++) { updateHeartbeatWithUsage(dataNodes[0],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,(HdfsConstants.MIN_BLOCKS_FOR_WRITE - 1) * BLOCK_SIZE,0L,0L,0L,0,0); updateHeartbeatWithUsage(dataNodesInBoundaryCase[i],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,0L,0L,0,0); } DatanodeStorageInfo[] targets; targets=chooseTarget(0,dataNodesInBoundaryCase[0]); assertEquals(targets.length,0); targets=chooseTarget(1,dataNodesInBoundaryCase[0]); assertEquals(targets.length,1); targets=chooseTarget(2,dataNodesInBoundaryCase[0]); assertEquals(targets.length,2); assertFalse(isOnSameRack(targets[0],targets[1])); targets=chooseTarget(3,dataNodesInBoundaryCase[0]); assertEquals(targets.length,3); assertTrue(checkTargetsOnDifferentNodeGroup(targets)); }

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * In this testcase, client is is a node outside of file system. * So the 1st replica can be placed on any node. * the 2nd replica should be placed on a different rack, * the 3rd replica should be placed on the same rack as the 2nd replica, * @throws Exception */ @Test public void testChooseTarget5() throws Exception { setupDataNodeCapacity(); DatanodeStorageInfo[] targets; targets=chooseTarget(0,NODE); assertEquals(targets.length,0); targets=chooseTarget(1,NODE); assertEquals(targets.length,1); targets=chooseTarget(2,NODE); assertEquals(targets.length,2); assertFalse(isOnSameRack(targets[0],targets[1])); targets=chooseTarget(3,NODE); assertEquals(targets.length,3); assertTrue(isOnSameRack(targets[1],targets[2])); assertFalse(isOnSameRack(targets[0],targets[1])); verifyNoTwoTargetsOnSameNodeGroup(targets); }

IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * In this testcase, client is dataNodes[0], but the dataNodes[1] is * not allowed to be chosen. So the 1st replica should be * placed on dataNodes[0], the 2nd replica should be placed on a different * rack, the 3rd should be on same rack as the 2nd replica but in different * node group, and the rest should be placed on a third rack. * @throws Exception */ @Test public void testChooseTarget2() throws Exception { DatanodeStorageInfo[] targets; BlockPlacementPolicyDefault repl=(BlockPlacementPolicyDefault)replicator; List chosenNodes=new ArrayList(); Set excludedNodes=new HashSet(); excludedNodes.add(dataNodes[1]); targets=repl.chooseTarget(filename,4,dataNodes[0],chosenNodes,false,excludedNodes,BLOCK_SIZE,StorageType.DEFAULT); assertEquals(targets.length,4); assertEquals(storages[0],targets[0]); assertTrue(cluster.isNodeGroupAware()); for (int i=1; i < 4; i++) { assertFalse(isOnSameNodeGroup(targets[0],targets[i])); } assertTrue(isOnSameRack(targets[1],targets[2]) || isOnSameRack(targets[2],targets[3])); assertFalse(isOnSameRack(targets[1],targets[3])); excludedNodes.clear(); chosenNodes.clear(); excludedNodes.add(dataNodes[1]); chosenNodes.add(storages[2]); targets=repl.chooseTarget(filename,1,dataNodes[0],chosenNodes,true,excludedNodes,BLOCK_SIZE,StorageType.DEFAULT); System.out.println("targets=" + Arrays.asList(targets)); assertEquals(2,targets.length); int i=0; for (; i < targets.length && !storages[2].equals(targets[i]); i++) ; assertTrue(i < targets.length); }

BooleanVerifier EqualityVerifier HybridVerifier 
/** * In this testcase, client is dataNodes[0], but dataNodes[0] is not qualified * to be chosen. So the 1st replica should be placed on dataNodes[1], * the 2nd replica should be placed on a different rack, * the 3rd replica should be placed on the same rack as the 2nd replica but in different nodegroup, * and the rest should be placed on the third rack. * @throws Exception */ @Test public void testChooseTarget3() throws Exception { updateHeartbeatWithUsage(dataNodes[0],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,(HdfsConstants.MIN_BLOCKS_FOR_WRITE - 1) * BLOCK_SIZE,0L,0L,0L,0,0); DatanodeStorageInfo[] targets; targets=chooseTarget(0); assertEquals(targets.length,0); targets=chooseTarget(1); assertEquals(targets.length,1); assertEquals(storages[1],targets[0]); targets=chooseTarget(2); assertEquals(targets.length,2); assertEquals(storages[1],targets[0]); assertFalse(isOnSameRack(targets[0],targets[1])); targets=chooseTarget(3); assertEquals(targets.length,3); assertEquals(storages[1],targets[0]); assertTrue(isOnSameRack(targets[1],targets[2])); assertFalse(isOnSameRack(targets[0],targets[1])); targets=chooseTarget(4); assertEquals(targets.length,4); assertEquals(storages[1],targets[0]); assertTrue(cluster.isNodeGroupAware()); verifyNoTwoTargetsOnSameNodeGroup(targets); assertTrue(isOnSameRack(targets[1],targets[2]) || isOnSameRack(targets[2],targets[3])); updateHeartbeatWithUsage(dataNodes[0],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,0L,0L,0,0); }

IterativeVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * In this testcase, client is dataNodes[0], but none of the nodes on rack 1 * is qualified to be chosen. So the 1st replica should be placed on either * rack 2 or rack 3. * the 2nd replica should be placed on a different rack, * the 3rd replica should be placed on the same rack as the 1st replica, but * in different node group. * @throws Exception */ @Test public void testChooseTarget4() throws Exception { for (int i=0; i < 3; i++) { updateHeartbeatWithUsage(dataNodes[i],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,(HdfsConstants.MIN_BLOCKS_FOR_WRITE - 1) * BLOCK_SIZE,0L,0L,0L,0,0); } DatanodeStorageInfo[] targets; targets=chooseTarget(0); assertEquals(targets.length,0); targets=chooseTarget(1); assertEquals(targets.length,1); assertFalse(isOnSameRack(dataNodes[0],targets[0])); targets=chooseTarget(2); assertEquals(targets.length,2); assertFalse(isOnSameRack(dataNodes[0],targets[0])); assertFalse(isOnSameRack(targets[0],targets[1])); targets=chooseTarget(3); assertEquals(targets.length,3); for (int i=0; i < 3; i++) { assertFalse(isOnSameRack(dataNodes[0],targets[i])); } verifyNoTwoTargetsOnSameNodeGroup(targets); assertTrue(isOnSameRack(targets[0],targets[1]) || isOnSameRack(targets[1],targets[2])); assertFalse(isOnSameRack(targets[0],targets[2])); }

BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test replica placement policy in case of targets more than number of * NodeGroups. * The 12-nodes cluster only has 6 NodeGroups, but in some cases, like: * placing submitted job file, there is requirement to choose more (10) * targets for placing replica. We should test it can return 6 targets. */ @Test public void testChooseMoreTargetsThanNodeGroups() throws Exception { for (int i=0; i < NUM_OF_DATANODES; i++) { cluster.remove(dataNodes[i]); } for (int i=0; i < NUM_OF_DATANODES_BOUNDARY; i++) { DatanodeDescriptor node=dataNodesInBoundaryCase[i]; if (cluster.contains(node)) { cluster.remove(node); } } for (int i=0; i < NUM_OF_DATANODES_MORE_TARGETS; i++) { cluster.add(dataNodesInMoreTargetsCase[i]); } for (int i=0; i < NUM_OF_DATANODES_MORE_TARGETS; i++) { updateHeartbeatWithUsage(dataNodesInMoreTargetsCase[i],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,0L,0L,0,0); } DatanodeStorageInfo[] targets; targets=chooseTarget(3,dataNodesInMoreTargetsCase[0]); assertEquals(targets.length,3); assertTrue(checkTargetsOnDifferentNodeGroup(targets)); targets=chooseTarget(10,dataNodesInMoreTargetsCase[0]); assertTrue(checkTargetsOnDifferentNodeGroup(targets)); assertEquals(targets.length,6); }

BooleanVerifier EqualityVerifier HybridVerifier 
/** * In this testcase, client is dataNodes[0]. So the 1st replica should be * placed on dataNodes[0], the 2nd replica should be placed on * different rack and third should be placed on different node (and node group) * of rack chosen for 2nd node. * The only excpetion is when the numOfReplicas is 2, * the 1st is on dataNodes[0] and the 2nd is on a different rack. * @throws Exception */ @Test public void testChooseTarget1() throws Exception { updateHeartbeatWithUsage(dataNodes[0],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,0L,0L,4,0); DatanodeStorageInfo[] targets; targets=chooseTarget(0); assertEquals(targets.length,0); targets=chooseTarget(1); assertEquals(targets.length,1); assertEquals(storages[0],targets[0]); targets=chooseTarget(2); assertEquals(targets.length,2); assertEquals(storages[0],targets[0]); assertFalse(isOnSameRack(targets[0],targets[1])); targets=chooseTarget(3); assertEquals(targets.length,3); assertEquals(storages[0],targets[0]); assertFalse(isOnSameRack(targets[0],targets[1])); assertTrue(isOnSameRack(targets[1],targets[2])); assertFalse(isOnSameNodeGroup(targets[1],targets[2])); targets=chooseTarget(4); assertEquals(targets.length,4); assertEquals(storages[0],targets[0]); assertTrue(isOnSameRack(targets[1],targets[2]) || isOnSameRack(targets[2],targets[3])); assertFalse(isOnSameRack(targets[0],targets[2])); verifyNoTwoTargetsOnSameNodeGroup(targets); updateHeartbeatWithUsage(dataNodes[0],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,0L,0L,0,0); }

Class: org.apache.hadoop.hdfs.server.blockmanagement.TestUnderReplicatedBlockQueues

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test that adding blocks with different replication counts puts them * into different queues * @throws Throwable if something goes wrong */ @Test public void testBlockPriorities() throws Throwable { UnderReplicatedBlocks queues=new UnderReplicatedBlocks(); Block block1=new Block(1); Block block2=new Block(2); Block block_very_under_replicated=new Block(3); Block block_corrupt=new Block(4); assertAdded(queues,block1,1,0,3); assertEquals(1,queues.getUnderReplicatedBlockCount()); assertEquals(1,queues.size()); assertInLevel(queues,block1,UnderReplicatedBlocks.QUEUE_HIGHEST_PRIORITY); assertFalse(queues.add(block1,1,0,3)); assertAdded(queues,block2,2,0,3); assertEquals(2,queues.getUnderReplicatedBlockCount()); assertEquals(2,queues.size()); assertInLevel(queues,block2,UnderReplicatedBlocks.QUEUE_UNDER_REPLICATED); assertAdded(queues,block_corrupt,0,0,3); assertEquals(3,queues.size()); assertEquals(2,queues.getUnderReplicatedBlockCount()); assertEquals(1,queues.getCorruptBlockSize()); assertInLevel(queues,block_corrupt,UnderReplicatedBlocks.QUEUE_WITH_CORRUPT_BLOCKS); assertAdded(queues,block_very_under_replicated,4,0,25); assertInLevel(queues,block_very_under_replicated,UnderReplicatedBlocks.QUEUE_VERY_UNDER_REPLICATED); }

Class: org.apache.hadoop.hdfs.server.common.TestGetUriFromString

APIUtilityVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Test for an OS dependent absolute paths. * @throws IOException */ @Test public void testAbsolutePathAsURI() throws IOException { URI u=null; u=Util.stringAsURI(ABSOLUTE_PATH_WINDOWS); assertNotNull("Uri should not be null for Windows path" + ABSOLUTE_PATH_WINDOWS,u); assertEquals(URI_FILE_SCHEMA,u.getScheme()); u=Util.stringAsURI(ABSOLUTE_PATH_UNIX); assertNotNull("Uri should not be null for Unix path" + ABSOLUTE_PATH_UNIX,u); assertEquals(URI_FILE_SCHEMA,u.getScheme()); }

APIUtilityVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Test for a URI * @throws IOException */ @Test public void testURI() throws IOException { LOG.info("Testing correct Unix URI: " + URI_UNIX); URI u=Util.stringAsURI(URI_UNIX); LOG.info("Uri: " + u); assertNotNull("Uri should not be null at this point",u); assertEquals(URI_FILE_SCHEMA,u.getScheme()); assertEquals(URI_PATH_UNIX,u.getPath()); LOG.info("Testing correct windows URI: " + URI_WINDOWS); u=Util.stringAsURI(URI_WINDOWS); LOG.info("Uri: " + u); assertNotNull("Uri should not be null at this point",u); assertEquals(URI_FILE_SCHEMA,u.getScheme()); assertEquals(URI_PATH_WINDOWS.replace("%20"," "),u.getPath()); }

Class: org.apache.hadoop.hdfs.server.common.TestJspHelper

APIUtilityVerifier UtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testGetUgiFromToken() throws IOException { conf.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY,"hdfs://localhost:4321/"); ServletContext context=mock(ServletContext.class); String realUser="TheDoctor"; String user="TheNurse"; conf.set(DFSConfigKeys.HADOOP_SECURITY_AUTHENTICATION,"kerberos"); UserGroupInformation.setConfiguration(conf); UserGroupInformation ugi; HttpServletRequest request; Text ownerText=new Text(user); DelegationTokenIdentifier dtId=new DelegationTokenIdentifier(ownerText,ownerText,new Text(realUser)); Token token=new Token(dtId,new DummySecretManager(0,0,0,0)); String tokenString=token.encodeToUrlString(); request=getMockRequest(null,null,null); when(request.getParameter(JspHelper.DELEGATION_PARAMETER_NAME)).thenReturn(tokenString); ugi=JspHelper.getUGI(context,request,conf); Assert.assertNotNull(ugi.getRealUser()); Assert.assertEquals(ugi.getRealUser().getShortUserName(),realUser); Assert.assertEquals(ugi.getShortUserName(),user); checkUgiFromToken(ugi); request=getMockRequest(realUser,null,null); when(request.getParameter(JspHelper.DELEGATION_PARAMETER_NAME)).thenReturn(tokenString); ugi=JspHelper.getUGI(context,request,conf); Assert.assertNotNull(ugi.getRealUser()); Assert.assertEquals(ugi.getRealUser().getShortUserName(),realUser); Assert.assertEquals(ugi.getShortUserName(),user); checkUgiFromToken(ugi); request=getMockRequest("rogue",null,null); when(request.getParameter(JspHelper.DELEGATION_PARAMETER_NAME)).thenReturn(tokenString); ugi=JspHelper.getUGI(context,request,conf); Assert.assertNotNull(ugi.getRealUser()); Assert.assertEquals(ugi.getRealUser().getShortUserName(),realUser); Assert.assertEquals(ugi.getShortUserName(),user); checkUgiFromToken(ugi); request=getMockRequest(null,user,null); when(request.getParameter(JspHelper.DELEGATION_PARAMETER_NAME)).thenReturn(tokenString); ugi=JspHelper.getUGI(context,request,conf); Assert.assertNotNull(ugi.getRealUser()); Assert.assertEquals(ugi.getRealUser().getShortUserName(),realUser); Assert.assertEquals(ugi.getShortUserName(),user); checkUgiFromToken(ugi); request=getMockRequest(null,null,"rogue"); when(request.getParameter(JspHelper.DELEGATION_PARAMETER_NAME)).thenReturn(tokenString); try { JspHelper.getUGI(context,request,conf); Assert.fail("bad request allowed"); } catch ( IOException ioe) { Assert.assertEquals("Usernames not matched: name=rogue != expected=" + user,ioe.getMessage()); } request=getMockRequest(null,user,"rogue"); when(request.getParameter(JspHelper.DELEGATION_PARAMETER_NAME)).thenReturn(tokenString); try { JspHelper.getUGI(context,request,conf); Assert.fail("bad request allowed"); } catch ( IOException ioe) { Assert.assertEquals("Usernames not matched: name=rogue != expected=" + user,ioe.getMessage()); } }

APIUtilityVerifier UtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testGetNonProxyUgi() throws IOException { conf.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY,"hdfs://localhost:4321/"); ServletContext context=mock(ServletContext.class); String realUser="TheDoctor"; String user="TheNurse"; conf.set(DFSConfigKeys.HADOOP_SECURITY_AUTHENTICATION,"kerberos"); UserGroupInformation.setConfiguration(conf); UserGroupInformation ugi; HttpServletRequest request; request=getMockRequest(null,null,null); try { JspHelper.getUGI(context,request,conf); Assert.fail("bad request allowed"); } catch ( IOException ioe) { Assert.assertEquals("Security enabled but user not authenticated by filter",ioe.getMessage()); } request=getMockRequest(null,realUser,null); try { JspHelper.getUGI(context,request,conf); Assert.fail("bad request allowed"); } catch ( IOException ioe) { Assert.assertEquals("Security enabled but user not authenticated by filter",ioe.getMessage()); } request=getMockRequest(realUser,null,null); ugi=JspHelper.getUGI(context,request,conf); Assert.assertNull(ugi.getRealUser()); Assert.assertEquals(ugi.getShortUserName(),realUser); checkUgiFromAuth(ugi); request=getMockRequest(realUser,realUser,null); ugi=JspHelper.getUGI(context,request,conf); Assert.assertNull(ugi.getRealUser()); Assert.assertEquals(ugi.getShortUserName(),realUser); checkUgiFromAuth(ugi); request=getMockRequest(realUser,user,null); try { JspHelper.getUGI(context,request,conf); Assert.fail("bad request allowed"); } catch ( IOException ioe) { Assert.assertEquals("Usernames not matched: name=" + user + " != expected="+ realUser,ioe.getMessage()); } }

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
@Test public void testReadWriteReplicaState(){ try { DataOutputBuffer out=new DataOutputBuffer(); DataInputBuffer in=new DataInputBuffer(); for ( HdfsServerConstants.ReplicaState repState : HdfsServerConstants.ReplicaState.values()) { repState.write(out); in.reset(out.getData(),out.getLength()); HdfsServerConstants.ReplicaState result=HdfsServerConstants.ReplicaState.read(in); assertTrue("testReadWrite error !!!",repState == result); out.reset(); in.reset(); } } catch ( Exception ex) { fail("testReadWrite ex error ReplicaState"); } }

APIUtilityVerifier UtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testGetProxyUgi() throws IOException { conf.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY,"hdfs://localhost:4321/"); ServletContext context=mock(ServletContext.class); String realUser="TheDoctor"; String user="TheNurse"; conf.set(DFSConfigKeys.HADOOP_SECURITY_AUTHENTICATION,"kerberos"); conf.set(DefaultImpersonationProvider.getTestProvider().getProxySuperuserGroupConfKey(realUser),"*"); conf.set(DefaultImpersonationProvider.getTestProvider().getProxySuperuserIpConfKey(realUser),"*"); ProxyUsers.refreshSuperUserGroupsConfiguration(conf); UserGroupInformation.setConfiguration(conf); UserGroupInformation ugi; HttpServletRequest request; request=getMockRequest(null,null,user); try { JspHelper.getUGI(context,request,conf); Assert.fail("bad request allowed"); } catch ( IOException ioe) { Assert.assertEquals("Security enabled but user not authenticated by filter",ioe.getMessage()); } request=getMockRequest(null,realUser,user); try { JspHelper.getUGI(context,request,conf); Assert.fail("bad request allowed"); } catch ( IOException ioe) { Assert.assertEquals("Security enabled but user not authenticated by filter",ioe.getMessage()); } request=getMockRequest(realUser,null,user); ugi=JspHelper.getUGI(context,request,conf); Assert.assertNotNull(ugi.getRealUser()); Assert.assertEquals(ugi.getRealUser().getShortUserName(),realUser); Assert.assertEquals(ugi.getShortUserName(),user); checkUgiFromAuth(ugi); request=getMockRequest(realUser,realUser,user); ugi=JspHelper.getUGI(context,request,conf); Assert.assertNotNull(ugi.getRealUser()); Assert.assertEquals(ugi.getRealUser().getShortUserName(),realUser); Assert.assertEquals(ugi.getShortUserName(),user); checkUgiFromAuth(ugi); request=getMockRequest(realUser,user,user); try { JspHelper.getUGI(context,request,conf); Assert.fail("bad request allowed"); } catch ( IOException ioe) { Assert.assertEquals("Usernames not matched: name=" + user + " != expected="+ realUser,ioe.getMessage()); } try { request=getMockRequest(user,null,realUser); JspHelper.getUGI(context,request,conf); Assert.fail("bad proxy request allowed"); } catch ( AuthorizationException ae) { Assert.assertEquals("User: " + user + " is not allowed to impersonate "+ realUser,ae.getMessage()); } try { request=getMockRequest(user,user,realUser); JspHelper.getUGI(context,request,conf); Assert.fail("bad proxy request allowed"); } catch ( AuthorizationException ae) { Assert.assertEquals("User: " + user + " is not allowed to impersonate "+ realUser,ae.getMessage()); } }

Class: org.apache.hadoop.hdfs.server.datanode.BlockReportTestBase

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test write a file, verifies and closes it. Then a couple of random blocks * is removed and BlockReport is forced; the FSNamesystem is pushed to * recalculate required DN's activities such as replications and so on. * The number of missing and under-replicated blocks should be the same in * case of a single-DN cluster. * @throws IOException in case of errors */ @Test(timeout=300000) public void blockReport_02() throws IOException { final String METHOD_NAME=GenericTestUtils.getMethodName(); LOG.info("Running test " + METHOD_NAME); Path filePath=new Path("/" + METHOD_NAME + ".dat"); DFSTestUtil.createFile(fs,filePath,FILE_SIZE,REPL_FACTOR,rand.nextLong()); File dataDir=new File(cluster.getDataDirectory()); assertTrue(dataDir.isDirectory()); List blocks2Remove=new ArrayList(); List removedIndex=new ArrayList(); List lBlocks=cluster.getNameNodeRpc().getBlockLocations(filePath.toString(),FILE_START,FILE_SIZE).getLocatedBlocks(); while (removedIndex.size() != 2) { int newRemoveIndex=rand.nextInt(lBlocks.size()); if (!removedIndex.contains(newRemoveIndex)) removedIndex.add(newRemoveIndex); } for ( Integer aRemovedIndex : removedIndex) { blocks2Remove.add(lBlocks.get(aRemovedIndex).getBlock()); } if (LOG.isDebugEnabled()) { LOG.debug("Number of blocks allocated " + lBlocks.size()); } final DataNode dn0=cluster.getDataNodes().get(DN_N0); for ( ExtendedBlock b : blocks2Remove) { if (LOG.isDebugEnabled()) { LOG.debug("Removing the block " + b.getBlockName()); } for ( File f : findAllFiles(dataDir,new MyFileFilter(b.getBlockName(),true))) { DataNodeTestUtils.getFSDataset(dn0).unfinalizeBlock(b); if (!f.delete()) { LOG.warn("Couldn't delete " + b.getBlockName()); } else { LOG.debug("Deleted file " + f.toString()); } } } waitTil(DN_RESCAN_EXTRA_WAIT); String poolId=cluster.getNamesystem().getBlockPoolId(); DatanodeRegistration dnR=dn0.getDNRegistrationForBP(poolId); StorageBlockReport[] reports=getBlockReports(dn0,poolId,false,false); sendBlockReports(dnR,poolId,reports); BlockManagerTestUtil.getComputedDatanodeWork(cluster.getNamesystem().getBlockManager()); printStats(); assertEquals("Wrong number of MissingBlocks is found",blocks2Remove.size(),cluster.getNamesystem().getMissingBlocksCount()); assertEquals("Wrong number of UnderReplicatedBlocks is found",blocks2Remove.size(),cluster.getNamesystem().getUnderReplicatedBlocks()); }

Class: org.apache.hadoop.hdfs.server.datanode.TestBPOfferService

APIUtilityVerifier InternalCallVerifier IdentityVerifier NullVerifier HybridVerifier 
/** * Test that the DataNode determines the active NameNode correctly * based on the HA-related information in heartbeat responses. * See HDFS-2627. */ @Test public void testPickActiveNameNode() throws Exception { BPOfferService bpos=setupBPOSForNNs(mockNN1,mockNN2); bpos.start(); try { waitForInitialization(bpos); assertNull(bpos.getActiveNN()); mockHaStatuses[0]=new NNHAStatusHeartbeat(HAServiceState.ACTIVE,1); bpos.triggerHeartbeatForTests(); assertSame(mockNN1,bpos.getActiveNN()); mockHaStatuses[1]=new NNHAStatusHeartbeat(HAServiceState.ACTIVE,2); bpos.triggerHeartbeatForTests(); assertSame(mockNN2,bpos.getActiveNN()); bpos.triggerHeartbeatForTests(); assertSame(mockNN2,bpos.getActiveNN()); mockHaStatuses[1]=new NNHAStatusHeartbeat(HAServiceState.STANDBY,2); bpos.triggerHeartbeatForTests(); assertNull(bpos.getActiveNN()); mockHaStatuses[0]=new NNHAStatusHeartbeat(HAServiceState.ACTIVE,3); bpos.triggerHeartbeatForTests(); assertSame(mockNN1,bpos.getActiveNN()); } finally { bpos.stop(); } }

Class: org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery

TestInitializer EqualityVerifier HybridVerifier 
/** * Starts an instance of DataNode * @throws IOException */ @Before public void startUp() throws IOException, URISyntaxException { tearDownDone=false; conf=new HdfsConfiguration(); conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,DATA_DIR); conf.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY,"0.0.0.0:0"); conf.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY,"0.0.0.0:0"); conf.set(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY,"0.0.0.0:0"); conf.setInt(CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY,0); FileSystem.setDefaultUri(conf,"hdfs://" + NN_ADDR.getHostName() + ":"+ NN_ADDR.getPort()); ArrayList locations=new ArrayList(); File dataDir=new File(DATA_DIR); FileUtil.fullyDelete(dataDir); dataDir.mkdirs(); StorageLocation location=StorageLocation.parse(dataDir.getPath()); locations.add(location); final DatanodeProtocolClientSideTranslatorPB namenode=mock(DatanodeProtocolClientSideTranslatorPB.class); Mockito.doAnswer(new Answer(){ @Override public DatanodeRegistration answer( InvocationOnMock invocation) throws Throwable { return (DatanodeRegistration)invocation.getArguments()[0]; } } ).when(namenode).registerDatanode(Mockito.any(DatanodeRegistration.class)); when(namenode.versionRequest()).thenReturn(new NamespaceInfo(1,CLUSTER_ID,POOL_ID,1L)); when(namenode.sendHeartbeat(Mockito.any(DatanodeRegistration.class),Mockito.any(StorageReport[].class),Mockito.anyLong(),Mockito.anyLong(),Mockito.anyInt(),Mockito.anyInt(),Mockito.anyInt())).thenReturn(new HeartbeatResponse(new DatanodeCommand[0],new NNHAStatusHeartbeat(HAServiceState.ACTIVE,1),null)); dn=new DataNode(conf,locations,null){ @Override DatanodeProtocolClientSideTranslatorPB connectToNN( InetSocketAddress nnAddr) throws IOException { Assert.assertEquals(NN_ADDR,nnAddr); return namenode; } } ; dn.getAllBpOs()[0].triggerHeartbeatForTests(); }

UtilityVerifier BooleanVerifier HybridVerifier 
/** * BlockRecovery_02.8. * Two replicas are in Finalized state * @throws IOException in case of an error */ @Test public void testFinalizedReplicas() throws IOException { if (LOG.isDebugEnabled()) { LOG.debug("Running " + GenericTestUtils.getMethodName()); } ReplicaRecoveryInfo replica1=new ReplicaRecoveryInfo(BLOCK_ID,REPLICA_LEN1,GEN_STAMP - 1,ReplicaState.FINALIZED); ReplicaRecoveryInfo replica2=new ReplicaRecoveryInfo(BLOCK_ID,REPLICA_LEN1,GEN_STAMP - 2,ReplicaState.FINALIZED); InterDatanodeProtocol dn1=mock(InterDatanodeProtocol.class); InterDatanodeProtocol dn2=mock(InterDatanodeProtocol.class); testSyncReplicas(replica1,replica2,dn1,dn2,REPLICA_LEN1); verify(dn1).updateReplicaUnderRecovery(block,RECOVERY_ID,REPLICA_LEN1); verify(dn2).updateReplicaUnderRecovery(block,RECOVERY_ID,REPLICA_LEN1); replica1=new ReplicaRecoveryInfo(BLOCK_ID,REPLICA_LEN1,GEN_STAMP - 1,ReplicaState.FINALIZED); replica2=new ReplicaRecoveryInfo(BLOCK_ID,REPLICA_LEN2,GEN_STAMP - 2,ReplicaState.FINALIZED); try { testSyncReplicas(replica1,replica2,dn1,dn2,REPLICA_LEN1); Assert.fail("Two finalized replicas should not have different lengthes!"); } catch ( IOException e) { Assert.assertTrue(e.getMessage().startsWith("Inconsistent size of finalized replicas. ")); } }

TestCleaner BranchVerifier BooleanVerifier HybridVerifier 
/** * Cleans the resources and closes the instance of datanode * @throws IOException if an error occurred */ @After public void tearDown() throws IOException { if (!tearDownDone && dn != null) { try { dn.shutdown(); } catch ( Exception e) { LOG.error("Cannot close: ",e); } finally { File dir=new File(DATA_DIR); if (dir.exists()) Assert.assertTrue("Cannot delete data-node dirs",FileUtil.fullyDelete(dir)); } tearDownDone=true; } }

Class: org.apache.hadoop.hdfs.server.datanode.TestBlockReplacement

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testBlockReplacement() throws Exception { final Configuration CONF=new HdfsConfiguration(); final String[] INITIAL_RACKS={"/RACK0","/RACK1","/RACK2"}; final String[] NEW_RACKS={"/RACK2"}; final short REPLICATION_FACTOR=(short)3; final int DEFAULT_BLOCK_SIZE=1024; final Random r=new Random(); CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,DEFAULT_BLOCK_SIZE); CONF.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY,DEFAULT_BLOCK_SIZE / 2); CONF.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,500); cluster=new MiniDFSCluster.Builder(CONF).numDataNodes(REPLICATION_FACTOR).racks(INITIAL_RACKS).build(); try { cluster.waitActive(); FileSystem fs=cluster.getFileSystem(); Path fileName=new Path("/tmp.txt"); DFSTestUtil.createFile(fs,fileName,DEFAULT_BLOCK_SIZE,REPLICATION_FACTOR,r.nextLong()); DFSTestUtil.waitReplication(fs,fileName,REPLICATION_FACTOR); InetSocketAddress addr=new InetSocketAddress("localhost",cluster.getNameNodePort()); DFSClient client=new DFSClient(addr,CONF); List locatedBlocks=client.getNamenode().getBlockLocations("/tmp.txt",0,DEFAULT_BLOCK_SIZE).getLocatedBlocks(); assertEquals(1,locatedBlocks.size()); LocatedBlock block=locatedBlocks.get(0); DatanodeInfo[] oldNodes=block.getLocations(); assertEquals(oldNodes.length,3); ExtendedBlock b=block.getBlock(); cluster.startDataNodes(CONF,1,true,null,NEW_RACKS); cluster.waitActive(); DatanodeInfo[] datanodes=client.datanodeReport(DatanodeReportType.ALL); DatanodeInfo newNode=null; for ( DatanodeInfo node : datanodes) { Boolean isNewNode=true; for ( DatanodeInfo oldNode : oldNodes) { if (node.equals(oldNode)) { isNewNode=false; break; } } if (isNewNode) { newNode=node; break; } } assertTrue(newNode != null); DatanodeInfo source=null; ArrayList proxies=new ArrayList(2); for ( DatanodeInfo node : datanodes) { if (node != newNode) { if (node.getNetworkLocation().equals(newNode.getNetworkLocation())) { source=node; } else { proxies.add(node); } } } assertTrue(source != null && proxies.size() == 2); LOG.info("Testcase 1: Proxy " + newNode + " does not contain the block "+ b); assertFalse(replaceBlock(b,source,newNode,proxies.get(0))); LOG.info("Testcase 2: Destination " + proxies.get(1) + " contains the block "+ b); assertFalse(replaceBlock(b,source,proxies.get(0),proxies.get(1))); LOG.info("Testcase 3: Source=" + source + " Proxy="+ proxies.get(0)+ " Destination="+ newNode); assertTrue(replaceBlock(b,source,proxies.get(0),newNode)); checkBlocks(new DatanodeInfo[]{newNode,proxies.get(0),proxies.get(1)},fileName.toString(),DEFAULT_BLOCK_SIZE,REPLICATION_FACTOR,client); LOG.info("Testcase 4: invalid del hint " + proxies.get(0)); assertTrue(replaceBlock(b,proxies.get(0),proxies.get(1),source)); checkBlocks(proxies.toArray(new DatanodeInfo[proxies.size()]),fileName.toString(),DEFAULT_BLOCK_SIZE,REPLICATION_FACTOR,client); } finally { cluster.shutdown(); } }

Class: org.apache.hadoop.hdfs.server.datanode.TestDataDirs

APIUtilityVerifier UtilityVerifier InternalCallVerifier ConditionMatcher HybridVerifier 
@Test(timeout=30000) public void testDataDirParsing() throws Throwable { Configuration conf=new Configuration(); List locations; File dir0=new File("/dir0"); File dir1=new File("/dir1"); File dir2=new File("/dir2"); File dir3=new File("/dir3"); String locations1="[disk]/dir0,[DISK]/dir1,[sSd]/dir2,[disK]/dir3"; conf.set(DFS_DATANODE_DATA_DIR_KEY,locations1); locations=DataNode.getStorageLocations(conf); assertThat(locations.size(),is(4)); assertThat(locations.get(0).getStorageType(),is(StorageType.DISK)); assertThat(locations.get(0).getUri(),is(dir0.toURI())); assertThat(locations.get(1).getStorageType(),is(StorageType.DISK)); assertThat(locations.get(1).getUri(),is(dir1.toURI())); assertThat(locations.get(2).getStorageType(),is(StorageType.SSD)); assertThat(locations.get(2).getUri(),is(dir2.toURI())); assertThat(locations.get(3).getStorageType(),is(StorageType.DISK)); assertThat(locations.get(3).getUri(),is(dir3.toURI())); String locations2="[BadMediaType]/dir0,[ssd]/dir1,[disk]/dir2"; conf.set(DFS_DATANODE_DATA_DIR_KEY,locations2); try { locations=DataNode.getStorageLocations(conf); fail(); } catch ( IllegalArgumentException iae) { DataNode.LOG.info("The exception is expected.",iae); } String locations3="/dir0,/dir1"; conf.set(DFS_DATANODE_DATA_DIR_KEY,locations3); locations=DataNode.getStorageLocations(conf); assertThat(locations.size(),is(2)); assertThat(locations.get(0).getStorageType(),is(StorageType.DISK)); assertThat(locations.get(0).getUri(),is(dir0.toURI())); assertThat(locations.get(1).getStorageType(),is(StorageType.DISK)); assertThat(locations.get(1).getUri(),is(dir1.toURI())); }

APIUtilityVerifier EqualityVerifier ConditionMatcher HybridVerifier 
@Test(timeout=30000) public void testDataDirValidation() throws Throwable { DataNodeDiskChecker diskChecker=mock(DataNodeDiskChecker.class); doThrow(new IOException()).doThrow(new IOException()).doNothing().when(diskChecker).checkDir(any(LocalFileSystem.class),any(Path.class)); LocalFileSystem fs=mock(LocalFileSystem.class); AbstractList locations=new ArrayList(); locations.add(StorageLocation.parse("file:/p1/")); locations.add(StorageLocation.parse("file:/p2/")); locations.add(StorageLocation.parse("file:/p3/")); List checkedLocations=DataNode.checkStorageLocations(locations,fs,diskChecker); assertEquals("number of valid data dirs",1,checkedLocations.size()); String validDir=checkedLocations.iterator().next().getFile().getPath(); assertThat("p3 should be valid",new File("/p3/").getPath(),is(validDir)); }

Class: org.apache.hadoop.hdfs.server.datanode.TestDataNodeMetrics

InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier 
/** * Tests that round-trip acks in a datanode write pipeline are correctly * measured. */ @Test public void testRoundTripAckMetric() throws Exception { final int datanodeCount=2; final int interval=1; Configuration conf=new HdfsConfiguration(); conf.set(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY,"" + interval); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(datanodeCount).build(); try { cluster.waitActive(); FileSystem fs=cluster.getFileSystem(); Path testFile=new Path("/testRoundTripAckMetric.txt"); FSDataOutputStream fsout=fs.create(testFile,(short)datanodeCount); DFSOutputStream dout=(DFSOutputStream)fsout.getWrappedStream(); dout.setChunksPerPacket(5); dout.setArtificialSlowdown(3000); fsout.write(new byte[10000]); DatanodeInfo[] pipeline=null; int count=0; while (pipeline == null && count < 5) { pipeline=dout.getPipeline(); System.out.println("Waiting for pipeline to be created."); Thread.sleep(1000); count++; } DatanodeInfo headInfo=pipeline[0]; DataNode headNode=null; for ( DataNode datanode : cluster.getDataNodes()) { if (datanode.getDatanodeId().equals(headInfo)) { headNode=datanode; break; } } assertNotNull("Could not find the head of the datanode write pipeline",headNode); Thread.sleep((interval + 1) * 1000); MetricsRecordBuilder dnMetrics=getMetrics(headNode.getMetrics().name()); assertTrue("Expected non-zero number of acks",getLongCounter("PacketAckRoundTripTimeNanosNumOps",dnMetrics) > 0); assertQuantileGauges("PacketAckRoundTripTimeNanos" + interval + "s",dnMetrics); } finally { if (cluster != null) { cluster.shutdown(); } } }

Class: org.apache.hadoop.hdfs.server.datanode.TestDataNodeMultipleRegistrations

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testDNWithInvalidStorageWithHA() throws Exception { MiniDFSNNTopology top=new MiniDFSNNTopology().addNameservice(new MiniDFSNNTopology.NSConf("ns1").addNN(new MiniDFSNNTopology.NNConf("nn0").setClusterId("cluster-1")).addNN(new MiniDFSNNTopology.NNConf("nn1").setClusterId("cluster-1"))); top.setFederation(true); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).nnTopology(top).numDataNodes(0).build(); try { cluster.startDataNodes(conf,1,true,null,null); Thread.sleep(10000); DataNode dn=cluster.getDataNodes().get(0); assertTrue("Datanode should be running",dn.isDatanodeUp()); assertEquals("BPOfferService should be running",1,dn.getAllBpOs().length); DataNodeProperties dnProp=cluster.stopDataNode(0); cluster.getNameNode(0).stop(); cluster.getNameNode(1).stop(); Configuration nn1=cluster.getConfiguration(0); Configuration nn2=cluster.getConfiguration(1); StartupOption.FORMAT.setClusterId("cluster-2"); DFSTestUtil.formatNameNode(nn1); MiniDFSCluster.copyNameDirs(FSNamesystem.getNamespaceDirs(nn1),FSNamesystem.getNamespaceDirs(nn2),nn2); cluster.restartNameNode(0,false); cluster.restartNameNode(1,false); cluster.restartDataNode(dnProp); Thread.sleep(10000); dn=cluster.getDataNodes().get(0); assertFalse("Datanode should have shutdown as only service failed",dn.isDatanodeUp()); } finally { cluster.shutdown(); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
/** * starts single nn and single dn and verifies registration and handshake * @throws IOException */ @Test public void testFedSingleNN() throws IOException { MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).nameNodePort(9927).build(); try { NameNode nn1=cluster.getNameNode(); assertNotNull("cannot create nn1",nn1); String bpid1=FSImageTestUtil.getFSImage(nn1).getBlockPoolID(); String cid1=FSImageTestUtil.getFSImage(nn1).getClusterID(); int lv1=FSImageTestUtil.getFSImage(nn1).getLayoutVersion(); LOG.info("nn1: lv=" + lv1 + ";cid="+ cid1+ ";bpid="+ bpid1+ ";uri="+ nn1.getNameNodeAddress()); DataNode dn=cluster.getDataNodes().get(0); final Map volInfos=dn.data.getVolumeInfoMap(); Assert.assertTrue("No volumes in the fsdataset",volInfos.size() > 0); int i=0; for ( Map.Entry e : volInfos.entrySet()) { LOG.info("vol " + i++ + ") "+ e.getKey()+ ": "+ e.getValue()); } assertEquals("number of volumes is wrong",2,volInfos.size()); for ( BPOfferService bpos : dn.getAllBpOs()) { LOG.info("reg: bpid=" + "; name=" + bpos.bpRegistration + "; sid="+ bpos.bpRegistration.getDatanodeUuid()+ "; nna="+ getNNSocketAddress(bpos)); } BPOfferService bpos1=dn.getAllBpOs()[0]; bpos1.triggerBlockReportForTests(); assertEquals("wrong nn address",getNNSocketAddress(bpos1),nn1.getNameNodeAddress()); assertEquals("wrong bpid",bpos1.getBlockPoolId(),bpid1); assertEquals("wrong cid",dn.getClusterId(),cid1); cluster.shutdown(); assertEquals(0,dn.getAllBpOs().length); cluster=null; } finally { if (cluster != null) { cluster.shutdown(); } } }

UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testMiniDFSClusterWithMultipleNN() throws IOException { Configuration conf=new HdfsConfiguration(); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2)).build(); try { cluster.waitActive(); Assert.assertEquals("(1)Should be 2 namenodes",2,cluster.getNumNameNodes()); cluster.addNameNode(conf,0); Assert.assertEquals("(1)Should be 3 namenodes",3,cluster.getNumNameNodes()); } catch ( IOException ioe) { Assert.fail("Failed to add NN to cluster:" + StringUtils.stringifyException(ioe)); } finally { cluster.shutdown(); } conf=new HdfsConfiguration(); cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleFederatedTopology(1)).build(); try { Assert.assertNotNull(cluster); cluster.waitActive(); Assert.assertEquals("(2)Should be 1 namenodes",1,cluster.getNumNameNodes()); cluster.addNameNode(conf,0); Assert.assertEquals("(2)Should be 2 namenodes",2,cluster.getNumNameNodes()); } catch ( IOException ioe) { Assert.fail("Failed to add NN to cluster:" + StringUtils.stringifyException(ioe)); } finally { cluster.shutdown(); } conf=new HdfsConfiguration(); cluster=new MiniDFSCluster.Builder(conf).build(); try { cluster.waitActive(); Assert.assertNotNull(cluster); Assert.assertEquals("(2)Should be 1 namenodes",1,cluster.getNumNameNodes()); cluster.addNameNode(conf,9929); Assert.fail("shouldn't be able to add another NN to non federated cluster"); } catch ( IOException e) { Assert.assertTrue(e.getMessage().startsWith("cannot add namenode")); Assert.assertEquals("(3)Should be 1 namenodes",1,cluster.getNumNameNodes()); } finally { cluster.shutdown(); } }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=20000) public void testClusterIdMismatchAtStartupWithHA() throws Exception { MiniDFSNNTopology top=new MiniDFSNNTopology().addNameservice(new MiniDFSNNTopology.NSConf("ns1").addNN(new MiniDFSNNTopology.NNConf("nn0")).addNN(new MiniDFSNNTopology.NNConf("nn1"))).addNameservice(new MiniDFSNNTopology.NSConf("ns2").addNN(new MiniDFSNNTopology.NNConf("nn2").setClusterId("bad-cid")).addNN(new MiniDFSNNTopology.NNConf("nn3").setClusterId("bad-cid"))); top.setFederation(true); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).nnTopology(top).numDataNodes(0).build(); try { cluster.startDataNodes(conf,1,true,null,null); Thread.sleep(10000); DataNode dn=cluster.getDataNodes().get(0); assertTrue("Datanode should be running",dn.isDatanodeUp()); assertEquals("Only one BPOfferService should be running",1,dn.getAllBpOs().length); } finally { cluster.shutdown(); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier NullVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
/** * start multiple NNs and single DN and verifies per BP registrations and * handshakes. * @throws IOException */ @Test public void test2NNRegistration() throws IOException { MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2)).build(); try { cluster.waitActive(); NameNode nn1=cluster.getNameNode(0); NameNode nn2=cluster.getNameNode(1); assertNotNull("cannot create nn1",nn1); assertNotNull("cannot create nn2",nn2); String bpid1=FSImageTestUtil.getFSImage(nn1).getBlockPoolID(); String bpid2=FSImageTestUtil.getFSImage(nn2).getBlockPoolID(); String cid1=FSImageTestUtil.getFSImage(nn1).getClusterID(); String cid2=FSImageTestUtil.getFSImage(nn2).getClusterID(); int lv1=FSImageTestUtil.getFSImage(nn1).getLayoutVersion(); int lv2=FSImageTestUtil.getFSImage(nn2).getLayoutVersion(); int ns1=FSImageTestUtil.getFSImage(nn1).getNamespaceID(); int ns2=FSImageTestUtil.getFSImage(nn2).getNamespaceID(); assertNotSame("namespace ids should be different",ns1,ns2); LOG.info("nn1: lv=" + lv1 + ";cid="+ cid1+ ";bpid="+ bpid1+ ";uri="+ nn1.getNameNodeAddress()); LOG.info("nn2: lv=" + lv2 + ";cid="+ cid2+ ";bpid="+ bpid2+ ";uri="+ nn2.getNameNodeAddress()); DataNode dn=cluster.getDataNodes().get(0); final Map volInfos=dn.data.getVolumeInfoMap(); Assert.assertTrue("No volumes in the fsdataset",volInfos.size() > 0); int i=0; for ( Map.Entry e : volInfos.entrySet()) { LOG.info("vol " + i++ + ") "+ e.getKey()+ ": "+ e.getValue()); } assertEquals("number of volumes is wrong",2,volInfos.size()); for ( BPOfferService bpos : dn.getAllBpOs()) { LOG.info("BP: " + bpos); } BPOfferService bpos1=dn.getAllBpOs()[0]; BPOfferService bpos2=dn.getAllBpOs()[1]; if (getNNSocketAddress(bpos1).equals(nn2.getNameNodeAddress())) { BPOfferService tmp=bpos1; bpos1=bpos2; bpos2=tmp; } assertEquals("wrong nn address",getNNSocketAddress(bpos1),nn1.getNameNodeAddress()); assertEquals("wrong nn address",getNNSocketAddress(bpos2),nn2.getNameNodeAddress()); assertEquals("wrong bpid",bpos1.getBlockPoolId(),bpid1); assertEquals("wrong bpid",bpos2.getBlockPoolId(),bpid2); assertEquals("wrong cid",dn.getClusterId(),cid1); assertEquals("cid should be same",cid2,cid1); assertEquals("namespace should be same",bpos1.bpNSInfo.namespaceID,ns1); assertEquals("namespace should be same",bpos2.bpNSInfo.namespaceID,ns2); } finally { cluster.shutdown(); } }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testClusterIdMismatch() throws Exception { MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2)).build(); try { cluster.waitActive(); DataNode dn=cluster.getDataNodes().get(0); BPOfferService[] bposs=dn.getAllBpOs(); LOG.info("dn bpos len (should be 2):" + bposs.length); Assert.assertEquals("should've registered with two namenodes",bposs.length,2); cluster.addNameNode(conf,9938); Thread.sleep(500); bposs=dn.getAllBpOs(); LOG.info("dn bpos len (should be 3):" + bposs.length); Assert.assertEquals("should've registered with three namenodes",bposs.length,3); StartupOption.FORMAT.setClusterId("DifferentCID"); cluster.addNameNode(conf,9948); NameNode nn4=cluster.getNameNode(3); assertNotNull("cannot create nn4",nn4); Thread.sleep(500); bposs=dn.getAllBpOs(); LOG.info("dn bpos len (still should be 3):" + bposs.length); Assert.assertEquals("should've registered with three namenodes",3,bposs.length); } finally { cluster.shutdown(); } }

Class: org.apache.hadoop.hdfs.server.datanode.TestDataNodeVolumeFailureReporting

InternalCallVerifier BooleanVerifier AssumptionSetter EqualityVerifier HybridVerifier 
/** * Test that individual volume failures do not cause DNs to fail, that * all volumes failed on a single datanode do cause it to fail, and * that the capacities and liveliness is adjusted correctly in the NN. */ @Test public void testSuccessiveVolumeFailures() throws Exception { assumeTrue(!System.getProperty("os.name").startsWith("Windows")); cluster.startDataNodes(conf,2,true,null,null); cluster.waitActive(); Thread.sleep(WAIT_FOR_HEARTBEATS); final DatanodeManager dm=cluster.getNamesystem().getBlockManager().getDatanodeManager(); final long origCapacity=DFSTestUtil.getLiveDatanodeCapacity(dm); long dnCapacity=DFSTestUtil.getDatanodeCapacity(dm,0); File dn1Vol1=new File(dataDir,"data" + (2 * 0 + 1)); File dn2Vol1=new File(dataDir,"data" + (2 * 1 + 1)); File dn3Vol1=new File(dataDir,"data" + (2 * 2 + 1)); File dn3Vol2=new File(dataDir,"data" + (2 * 2 + 2)); assertTrue("Couldn't chmod local vol",FileUtil.setExecutable(dn1Vol1,false)); assertTrue("Couldn't chmod local vol",FileUtil.setExecutable(dn2Vol1,false)); Path file1=new Path("/test1"); DFSTestUtil.createFile(fs,file1,1024,(short)3,1L); DFSTestUtil.waitReplication(fs,file1,(short)3); ArrayList dns=cluster.getDataNodes(); assertTrue("DN1 should be up",dns.get(0).isDatanodeUp()); assertTrue("DN2 should be up",dns.get(1).isDatanodeUp()); assertTrue("DN3 should be up",dns.get(2).isDatanodeUp()); assertCounter("VolumeFailures",1L,getMetrics(dns.get(0).getMetrics().name())); assertCounter("VolumeFailures",1L,getMetrics(dns.get(1).getMetrics().name())); assertCounter("VolumeFailures",0L,getMetrics(dns.get(2).getMetrics().name())); assert (WAIT_FOR_HEARTBEATS * 10) > WAIT_FOR_DEATH; DFSTestUtil.waitForDatanodeStatus(dm,3,0,2,origCapacity - (1 * dnCapacity),WAIT_FOR_HEARTBEATS); assertTrue("Couldn't chmod local vol",FileUtil.setExecutable(dn3Vol1,false)); Path file2=new Path("/test2"); DFSTestUtil.createFile(fs,file2,1024,(short)3,1L); DFSTestUtil.waitReplication(fs,file2,(short)3); assertTrue("DN3 should still be up",dns.get(2).isDatanodeUp()); assertCounter("VolumeFailures",1L,getMetrics(dns.get(2).getMetrics().name())); ArrayList live=new ArrayList(); ArrayList dead=new ArrayList(); dm.fetchDatanodes(live,dead,false); live.clear(); dead.clear(); dm.fetchDatanodes(live,dead,false); assertEquals("DN3 should have 1 failed volume",1,live.get(2).getVolumeFailures()); dnCapacity=DFSTestUtil.getDatanodeCapacity(dm,0); DFSTestUtil.waitForDatanodeStatus(dm,3,0,3,origCapacity - (3 * dnCapacity),WAIT_FOR_HEARTBEATS); assertTrue("Couldn't chmod local vol",FileUtil.setExecutable(dn3Vol2,false)); Path file3=new Path("/test3"); DFSTestUtil.createFile(fs,file3,1024,(short)3,1L); DFSTestUtil.waitReplication(fs,file3,(short)2); DFSTestUtil.waitForDatanodeDeath(dns.get(2)); assertCounter("VolumeFailures",2L,getMetrics(dns.get(2).getMetrics().name())); DFSTestUtil.waitForDatanodeStatus(dm,2,1,2,origCapacity - (4 * dnCapacity),WAIT_FOR_HEARTBEATS); assertTrue("Couldn't chmod local vol",FileUtil.setExecutable(dn1Vol1,true)); assertTrue("Couldn't chmod local vol",FileUtil.setExecutable(dn2Vol1,true)); assertTrue("Couldn't chmod local vol",FileUtil.setExecutable(dn3Vol1,true)); assertTrue("Couldn't chmod local vol",FileUtil.setExecutable(dn3Vol2,true)); cluster.restartDataNodes(); cluster.waitActive(); Path file4=new Path("/test4"); DFSTestUtil.createFile(fs,file4,1024,(short)3,1L); DFSTestUtil.waitReplication(fs,file4,(short)3); DFSTestUtil.waitForDatanodeStatus(dm,3,0,0,origCapacity,WAIT_FOR_HEARTBEATS); }

BooleanVerifier AssumptionSetter HybridVerifier 
/** * Test that the NN re-learns of volume failures after restart. */ @Test public void testVolFailureStatsPreservedOnNNRestart() throws Exception { assumeTrue(!System.getProperty("os.name").startsWith("Windows")); cluster.startDataNodes(conf,2,true,null,null); cluster.waitActive(); final DatanodeManager dm=cluster.getNamesystem().getBlockManager().getDatanodeManager(); long origCapacity=DFSTestUtil.getLiveDatanodeCapacity(dm); long dnCapacity=DFSTestUtil.getDatanodeCapacity(dm,0); File dn1Vol1=new File(dataDir,"data" + (2 * 0 + 1)); File dn2Vol1=new File(dataDir,"data" + (2 * 1 + 1)); assertTrue("Couldn't chmod local vol",FileUtil.setExecutable(dn1Vol1,false)); assertTrue("Couldn't chmod local vol",FileUtil.setExecutable(dn2Vol1,false)); Path file1=new Path("/test1"); DFSTestUtil.createFile(fs,file1,1024,(short)2,1L); DFSTestUtil.waitReplication(fs,file1,(short)2); DFSTestUtil.waitForDatanodeStatus(dm,3,0,2,origCapacity - (1 * dnCapacity),WAIT_FOR_HEARTBEATS); cluster.restartNameNode(0); cluster.waitActive(); DFSTestUtil.waitForDatanodeStatus(dm,3,0,2,origCapacity - (1 * dnCapacity),WAIT_FOR_HEARTBEATS); }

Class: org.apache.hadoop.hdfs.server.datanode.TestDataNodeVolumeFailureToleration

InternalCallVerifier AssumptionSetter EqualityVerifier HybridVerifier 
/** * Test that a volume that is considered failed on startup is seen as * a failed volume by the NN. */ @Test public void testFailedVolumeOnStartupIsCounted() throws Exception { assumeTrue(!System.getProperty("os.name").startsWith("Windows")); final DatanodeManager dm=cluster.getNamesystem().getBlockManager().getDatanodeManager(); long origCapacity=DFSTestUtil.getLiveDatanodeCapacity(dm); File dir=new File(cluster.getInstanceStorageDir(0,0),"current"); try { prepareDirToFail(dir); restartDatanodes(1,false); assertEquals(true,cluster.getDataNodes().get(0).isBPServiceAlive(cluster.getNamesystem().getBlockPoolId())); DFSTestUtil.waitForDatanodeStatus(dm,1,0,1,origCapacity / 2,WAIT_FOR_HEARTBEATS); } finally { FileUtil.chmod(dir.toString(),"755"); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier AssumptionSetter HybridVerifier 
/** * Test the DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY configuration * option, ie the DN tolerates a failed-to-use scenario during * its start-up. */ @Test public void testValidVolumesAtStartup() throws Exception { assumeTrue(!System.getProperty("os.name").startsWith("Windows")); cluster.shutdownDataNodes(); conf.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY,1); File tld=new File(MiniDFSCluster.getBaseDirectory(),"badData"); File dataDir1=new File(tld,"data1"); File dataDir1Actual=new File(dataDir1,"1"); dataDir1Actual.mkdirs(); File dataDir2=new File(tld,"data2"); prepareDirToFail(dataDir2); File dataDir2Actual=new File(dataDir2,"2"); conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,dataDir1Actual.getPath() + "," + dataDir2Actual.getPath()); cluster.startDataNodes(conf,1,false,null,null); cluster.waitActive(); try { assertTrue("The DN should have started up fine.",cluster.isDataNodeUp()); DataNode dn=cluster.getDataNodes().get(0); String si=DataNodeTestUtils.getFSDataset(dn).getStorageInfo(); assertTrue("The DN should have started with this directory",si.contains(dataDir1Actual.getPath())); assertFalse("The DN shouldn't have a bad directory.",si.contains(dataDir2Actual.getPath())); } finally { cluster.shutdownDataNodes(); FileUtil.chmod(dataDir2.toString(),"755"); } }

BooleanVerifier AssumptionSetter HybridVerifier 
/** * Test the DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY configuration * option, ie the DN shuts itself down when the number of failures * experienced drops below the tolerated amount. */ @Test public void testConfigureMinValidVolumes() throws Exception { assumeTrue(!System.getProperty("os.name").startsWith("Windows")); conf.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY,0); cluster.startDataNodes(conf,2,true,null,null); cluster.waitActive(); final DatanodeManager dm=cluster.getNamesystem().getBlockManager().getDatanodeManager(); long origCapacity=DFSTestUtil.getLiveDatanodeCapacity(dm); long dnCapacity=DFSTestUtil.getDatanodeCapacity(dm,0); File dn2Vol1=new File(dataDir,"data" + (2 * 1 + 1)); assertTrue("Couldn't chmod local vol",FileUtil.setExecutable(dn2Vol1,false)); Path file1=new Path("/test1"); DFSTestUtil.createFile(fs,file1,1024,(short)3,1L); DFSTestUtil.waitReplication(fs,file1,(short)2); DFSTestUtil.waitForDatanodeStatus(dm,2,1,0,origCapacity - (1 * dnCapacity),WAIT_FOR_HEARTBEATS); assertTrue("Couldn't chmod local vol",FileUtil.setExecutable(dn2Vol1,true)); Path file2=new Path("/test2"); DFSTestUtil.createFile(fs,file2,1024,(short)3,1L); DFSTestUtil.waitReplication(fs,file2,(short)2); }

Class: org.apache.hadoop.hdfs.server.datanode.TestDataStorage

TestInitializer BooleanVerifier HybridVerifier 
@Before public void setUp() throws IOException { storage=new DataStorage(); nsInfo=new NamespaceInfo(0,CLUSTER_ID,DEFAULT_BPID,CTIME,BUILD_VERSION,SOFTWARE_VERSION); FileUtil.fullyDelete(TEST_DIR); assertTrue("Failed to make test dir.",TEST_DIR.mkdirs()); }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testRecoverTransitionReadFailure() throws IOException { final int numLocations=3; List locations=createStorageLocations(numLocations,true); try { storage.recoverTransitionRead(mockDN,nsInfo,locations,START_OPT); fail("An IOException should throw: all StorageLocations are NON_EXISTENT"); } catch ( IOException e) { GenericTestUtils.assertExceptionContains("All specified directories are not accessible or do not exist.",e); } assertEquals(0,storage.getNumStorageDirs()); }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
/** * This test enforces the behavior that if there is an exception from * doTransition() during DN starts up, the storage directories that have * already been processed are still visible, i.e., in * DataStorage.storageDirs(). */ @Test public void testRecoverTransitionReadDoTransitionFailure() throws IOException { final int numLocations=3; List locations=createStorageLocations(numLocations); String bpid=nsInfo.getBlockPoolID(); storage.recoverTransitionRead(mockDN,bpid,nsInfo,locations,START_OPT); storage.unlockAll(); storage=new DataStorage(); nsInfo.clusterID="cluster1"; try { storage.recoverTransitionRead(mockDN,bpid,nsInfo,locations,START_OPT); fail("Expect to throw an exception from doTransition()"); } catch ( IOException e) { GenericTestUtils.assertExceptionContains("Incompatible clusterIDs",e); } assertEquals(numLocations,storage.getNumStorageDirs()); }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testAddStorageDirectories() throws IOException, URISyntaxException { final int numLocations=3; final int numNamespace=3; List locations=createStorageLocations(numLocations); List namespaceInfos=createNamespaceInfos(numNamespace); for ( NamespaceInfo ni : namespaceInfos) { storage.addStorageLocations(mockDN,ni,locations,START_OPT); for ( StorageLocation sl : locations) { checkDir(sl.getFile()); checkDir(sl.getFile(),ni.getBlockPoolID()); } } assertEquals(numLocations,storage.getNumStorageDirs()); locations=createStorageLocations(numLocations); try { storage.addStorageLocations(mockDN,namespaceInfos.get(0),locations,START_OPT); fail("Expected to throw IOException: adding active directories."); } catch ( IOException e) { GenericTestUtils.assertExceptionContains("All specified directories are not accessible or do not exist.",e); } assertEquals(numLocations,storage.getNumStorageDirs()); locations=createStorageLocations(6); storage.addStorageLocations(mockDN,nsInfo,locations,START_OPT); assertEquals(6,storage.getNumStorageDirs()); }

Class: org.apache.hadoop.hdfs.server.datanode.TestDatanodeRegister

UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testDifferentLayoutVersions() throws Exception { assertEquals(HdfsConstants.NAMENODE_LAYOUT_VERSION,actor.retrieveNamespaceInfo().getLayoutVersion()); doReturn(HdfsConstants.NAMENODE_LAYOUT_VERSION * 1000).when(fakeNsInfo).getLayoutVersion(); try { actor.retrieveNamespaceInfo(); } catch ( IOException e) { fail("Should not fail to retrieve NS info from DN with different layout version"); } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testSoftwareVersionDifferences() throws Exception { assertEquals(VersionInfo.getVersion(),actor.retrieveNamespaceInfo().getSoftwareVersion()); doReturn("4.0.0").when(fakeNsInfo).getSoftwareVersion(); doReturn("3.0.0").when(mockDnConf).getMinimumNameNodeVersion(); assertEquals("4.0.0",actor.retrieveNamespaceInfo().getSoftwareVersion()); doReturn("3.0.0").when(fakeNsInfo).getSoftwareVersion(); doReturn("4.0.0").when(mockDnConf).getMinimumNameNodeVersion(); try { actor.retrieveNamespaceInfo(); fail("Should have thrown an exception for NN with too-low version"); } catch ( IncorrectVersionException ive) { GenericTestUtils.assertExceptionContains("The reported NameNode version is too low",ive); LOG.info("Got expected exception",ive); } }

Class: org.apache.hadoop.hdfs.server.datanode.TestDeleteBlockPool

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testDfsAdminDeleteBlockPool() throws Exception { Configuration conf=new Configuration(); MiniDFSCluster cluster=null; try { conf.set(DFSConfigKeys.DFS_NAMESERVICES,"namesServerId1,namesServerId2"); cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleFederatedTopology(conf.get(DFSConfigKeys.DFS_NAMESERVICES))).numDataNodes(1).build(); cluster.waitActive(); FileSystem fs1=cluster.getFileSystem(0); FileSystem fs2=cluster.getFileSystem(1); DFSTestUtil.createFile(fs1,new Path("/alpha"),1024,(short)1,54); DFSTestUtil.createFile(fs2,new Path("/beta"),1024,(short)1,54); DataNode dn1=cluster.getDataNodes().get(0); String bpid1=cluster.getNamesystem(0).getBlockPoolId(); String bpid2=cluster.getNamesystem(1).getBlockPoolId(); File dn1StorageDir1=cluster.getInstanceStorageDir(0,0); File dn1StorageDir2=cluster.getInstanceStorageDir(0,1); Configuration nn1Conf=cluster.getConfiguration(0); nn1Conf.set(DFSConfigKeys.DFS_NAMESERVICES,"namesServerId1"); dn1.refreshNamenodes(nn1Conf); assertEquals(1,dn1.getAllBpOs().length); DFSAdmin admin=new DFSAdmin(nn1Conf); String dn1Address=dn1.getDatanodeId().getIpAddr() + ":" + dn1.getIpcPort(); String[] args={"-deleteBlockPool",dn1Address,bpid2}; int ret=admin.run(args); assertFalse(0 == ret); verifyBlockPoolDirectories(true,dn1StorageDir1,bpid2); verifyBlockPoolDirectories(true,dn1StorageDir2,bpid2); String[] forceArgs={"-deleteBlockPool",dn1Address,bpid2,"force"}; ret=admin.run(forceArgs); assertEquals(0,ret); verifyBlockPoolDirectories(false,dn1StorageDir1,bpid2); verifyBlockPoolDirectories(false,dn1StorageDir2,bpid2); verifyBlockPoolDirectories(true,dn1StorageDir1,bpid1); verifyBlockPoolDirectories(true,dn1StorageDir2,bpid1); } finally { if (cluster != null) { cluster.shutdown(); } } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testDeleteBlockPool() throws Exception { Configuration conf=new Configuration(); MiniDFSCluster cluster=null; try { conf.set(DFSConfigKeys.DFS_NAMESERVICES,"namesServerId1,namesServerId2"); cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleFederatedTopology(conf.get(DFSConfigKeys.DFS_NAMESERVICES))).numDataNodes(2).build(); cluster.waitActive(); FileSystem fs1=cluster.getFileSystem(0); FileSystem fs2=cluster.getFileSystem(1); DFSTestUtil.createFile(fs1,new Path("/alpha"),1024,(short)2,54); DFSTestUtil.createFile(fs2,new Path("/beta"),1024,(short)2,54); DataNode dn1=cluster.getDataNodes().get(0); DataNode dn2=cluster.getDataNodes().get(1); String bpid1=cluster.getNamesystem(0).getBlockPoolId(); String bpid2=cluster.getNamesystem(1).getBlockPoolId(); File dn1StorageDir1=cluster.getInstanceStorageDir(0,0); File dn1StorageDir2=cluster.getInstanceStorageDir(0,1); File dn2StorageDir1=cluster.getInstanceStorageDir(1,0); File dn2StorageDir2=cluster.getInstanceStorageDir(1,1); try { dn1.deleteBlockPool(bpid1,true); fail("Must not delete a running block pool"); } catch ( IOException expected) { } Configuration nn1Conf=cluster.getConfiguration(1); nn1Conf.set(DFSConfigKeys.DFS_NAMESERVICES,"namesServerId2"); dn1.refreshNamenodes(nn1Conf); assertEquals(1,dn1.getAllBpOs().length); try { dn1.deleteBlockPool(bpid1,false); fail("Must not delete if any block files exist unless " + "force is true"); } catch ( IOException expected) { } verifyBlockPoolDirectories(true,dn1StorageDir1,bpid1); verifyBlockPoolDirectories(true,dn1StorageDir2,bpid1); dn1.deleteBlockPool(bpid1,true); verifyBlockPoolDirectories(false,dn1StorageDir1,bpid1); verifyBlockPoolDirectories(false,dn1StorageDir2,bpid1); fs1.delete(new Path("/alpha"),true); File finalDir1=MiniDFSCluster.getFinalizedDir(dn2StorageDir1,bpid1); File finalDir2=MiniDFSCluster.getFinalizedDir(dn2StorageDir1,bpid2); while ((!DatanodeUtil.dirNoFilesRecursive(finalDir1)) || (!DatanodeUtil.dirNoFilesRecursive(finalDir2))) { try { Thread.sleep(3000); } catch ( Exception ignored) { } } cluster.shutdownNameNode(0); try { dn2.deleteBlockPool(bpid1,true); fail("Must not delete a running block pool"); } catch ( IOException expected) { } dn2.refreshNamenodes(nn1Conf); assertEquals(1,dn2.getAllBpOs().length); verifyBlockPoolDirectories(true,dn2StorageDir1,bpid1); verifyBlockPoolDirectories(true,dn2StorageDir2,bpid1); dn2.deleteBlockPool(bpid1,false); verifyBlockPoolDirectories(false,dn2StorageDir1,bpid1); verifyBlockPoolDirectories(false,dn2StorageDir2,bpid1); verifyBlockPoolDirectories(true,dn1StorageDir1,bpid2); verifyBlockPoolDirectories(true,dn1StorageDir2,bpid2); verifyBlockPoolDirectories(true,dn2StorageDir1,bpid2); verifyBlockPoolDirectories(true,dn2StorageDir2,bpid2); Path gammaFile=new Path("/gamma"); DFSTestUtil.createFile(fs2,gammaFile,1024,(short)1,55); fs2.setReplication(gammaFile,(short)2); DFSTestUtil.waitReplication(fs2,gammaFile,(short)2); } finally { if (cluster != null) { cluster.shutdown(); } } }

Class: org.apache.hadoop.hdfs.server.datanode.TestFsDatasetCache

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test(timeout=600000) public void testUncachingBlocksBeforeCachingFinishes() throws Exception { LOG.info("beginning testUncachingBlocksBeforeCachingFinishes"); final int NUM_BLOCKS=5; DFSTestUtil.verifyExpectedCacheUsage(0,0,fsd); final Path testFile=new Path("/testCacheBlock"); final long testFileLen=BLOCK_SIZE * NUM_BLOCKS; DFSTestUtil.createFile(fs,testFile,testFileLen,(short)1,0xABBAl); HdfsBlockLocation[] locs=(HdfsBlockLocation[])fs.getFileBlockLocations(testFile,0,testFileLen); assertEquals("Unexpected number of blocks",NUM_BLOCKS,locs.length); final long[] blockSizes=getBlockSizes(locs); final long cacheCapacity=fsd.getCacheCapacity(); long cacheUsed=fsd.getCacheUsed(); long current=0; assertEquals("Unexpected cache capacity",CACHE_CAPACITY,cacheCapacity); assertEquals("Unexpected amount of cache used",current,cacheUsed); NativeIO.POSIX.setCacheManipulator(new NoMlockCacheManipulator(){ @Override public void mlock( String identifier, ByteBuffer mmap, long length) throws IOException { LOG.info("An mlock operation is starting on " + identifier); try { Thread.sleep(3000); } catch ( InterruptedException e) { Assert.fail(); } } } ); for (int i=0; i < NUM_BLOCKS; i++) { setHeartbeatResponse(cacheBlock(locs[i])); current=DFSTestUtil.verifyExpectedCacheUsage(current + blockSizes[i],i + 1,fsd); } setHeartbeatResponse(new DatanodeCommand[]{getResponse(locs,DatanodeProtocol.DNA_UNCACHE)}); current=DFSTestUtil.verifyExpectedCacheUsage(0,0,fsd); LOG.info("finishing testUncachingBlocksBeforeCachingFinishes"); }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test(timeout=60000) public void testReCacheAfterUncache() throws Exception { final int TOTAL_BLOCKS_PER_CACHE=Ints.checkedCast(CACHE_CAPACITY / BLOCK_SIZE); BlockReaderTestUtil.enableHdfsCachingTracing(); Assert.assertEquals(0,CACHE_CAPACITY % BLOCK_SIZE); final Path SMALL_FILE=new Path("/smallFile"); DFSTestUtil.createFile(fs,SMALL_FILE,BLOCK_SIZE,(short)1,0xcafe); final Path BIG_FILE=new Path("/bigFile"); DFSTestUtil.createFile(fs,BIG_FILE,TOTAL_BLOCKS_PER_CACHE * BLOCK_SIZE,(short)1,0xbeef); final DistributedFileSystem dfs=cluster.getFileSystem(); dfs.addCachePool(new CachePoolInfo("pool")); final long bigCacheDirectiveId=dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPool("pool").setPath(BIG_FILE).setReplication((short)1).build()); GenericTestUtils.waitFor(new Supplier(){ @Override public Boolean get(){ MetricsRecordBuilder dnMetrics=getMetrics(dn.getMetrics().name()); long blocksCached=MetricsAsserts.getLongCounter("BlocksCached",dnMetrics); if (blocksCached != TOTAL_BLOCKS_PER_CACHE) { LOG.info("waiting for " + TOTAL_BLOCKS_PER_CACHE + " to "+ "be cached. Right now only "+ blocksCached+ " blocks are cached."); return false; } LOG.info(TOTAL_BLOCKS_PER_CACHE + " blocks are now cached."); return true; } } ,1000,30000); final long shortCacheDirectiveId=dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPool("pool").setPath(SMALL_FILE).setReplication((short)1).build()); Thread.sleep(10000); MetricsRecordBuilder dnMetrics=getMetrics(dn.getMetrics().name()); Assert.assertEquals(TOTAL_BLOCKS_PER_CACHE,MetricsAsserts.getLongCounter("BlocksCached",dnMetrics)); dfs.removeCacheDirective(bigCacheDirectiveId); GenericTestUtils.waitFor(new Supplier(){ @Override public Boolean get(){ RemoteIterator iter; try { iter=dfs.listCacheDirectives(new CacheDirectiveInfo.Builder().build()); CacheDirectiveEntry entry; do { entry=iter.next(); } while (entry.getInfo().getId() != shortCacheDirectiveId); if (entry.getStats().getFilesCached() != 1) { LOG.info("waiting for directive " + shortCacheDirectiveId + " to be cached. stats = "+ entry.getStats()); return false; } LOG.info("directive " + shortCacheDirectiveId + " has been cached."); } catch ( IOException e) { Assert.fail("unexpected exception" + e.toString()); } return true; } } ,1000,30000); dfs.removeCacheDirective(shortCacheDirectiveId); }

Class: org.apache.hadoop.hdfs.server.datanode.TestReadOnlySharedStorage

APIUtilityVerifier TestInitializer InternalCallVerifier ConditionMatcher HybridVerifier 
/** * Setup a {@link MiniDFSCluster}. * Create a block with both {@link State#NORMAL} and {@link State#READ_ONLY_SHARED} replicas. */ @Before public void setup() throws IOException, InterruptedException { conf=new HdfsConfiguration(); SimulatedFSDataset.setFactory(conf); Configuration[] overlays=new Configuration[NUM_DATANODES]; for (int i=0; i < overlays.length; i++) { overlays[i]=new Configuration(); if (i == RO_NODE_INDEX) { overlays[i].setEnum(SimulatedFSDataset.CONFIG_PROPERTY_STATE,i == RO_NODE_INDEX ? READ_ONLY_SHARED : NORMAL); } } cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATANODES).dataNodeConfOverlays(overlays).build(); fs=cluster.getFileSystem(); blockManager=cluster.getNameNode().getNamesystem().getBlockManager(); datanodeManager=blockManager.getDatanodeManager(); client=new DFSClient(new InetSocketAddress("localhost",cluster.getNameNodePort()),cluster.getConfiguration(0)); for (int i=0; i < NUM_DATANODES; i++) { DataNode dataNode=cluster.getDataNodes().get(i); validateStorageState(BlockManagerTestUtil.getStorageReportsForDatanode(datanodeManager.getDatanode(dataNode.getDatanodeId())),i == RO_NODE_INDEX ? READ_ONLY_SHARED : NORMAL); } DFSTestUtil.createFile(fs,PATH,BLOCK_SIZE,BLOCK_SIZE,BLOCK_SIZE,(short)1,seed); LocatedBlock locatedBlock=getLocatedBlock(); extendedBlock=locatedBlock.getBlock(); block=extendedBlock.getLocalBlock(); assertThat(locatedBlock.getLocations().length,is(1)); normalDataNode=locatedBlock.getLocations()[0]; readOnlyDataNode=datanodeManager.getDatanode(cluster.getDataNodes().get(RO_NODE_INDEX).getDatanodeId()); assertThat(normalDataNode,is(not(readOnlyDataNode))); validateNumberReplicas(1); cluster.injectBlocks(0,RO_NODE_INDEX,Collections.singleton(block)); waitForLocations(2); }

Class: org.apache.hadoop.hdfs.server.datanode.TestRefreshNamenodes

IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testRefreshNamenodes() throws IOException { Configuration conf=new Configuration(); MiniDFSCluster cluster=null; try { MiniDFSNNTopology topology=new MiniDFSNNTopology().addNameservice(new NSConf("ns1").addNN(new NNConf(null).setIpcPort(nnPort1))).setFederation(true); cluster=new MiniDFSCluster.Builder(conf).nnTopology(topology).build(); DataNode dn=cluster.getDataNodes().get(0); assertEquals(1,dn.getAllBpOs().length); cluster.addNameNode(conf,nnPort2); assertEquals(2,dn.getAllBpOs().length); cluster.addNameNode(conf,nnPort3); assertEquals(3,dn.getAllBpOs().length); cluster.addNameNode(conf,nnPort4); Set nnAddrsFromCluster=Sets.newHashSet(); for (int i=0; i < 4; i++) { assertTrue(nnAddrsFromCluster.add(cluster.getNameNode(i).getNameNodeAddress())); } Set nnAddrsFromDN=Sets.newHashSet(); for ( BPOfferService bpos : dn.getAllBpOs()) { for ( BPServiceActor bpsa : bpos.getBPServiceActors()) { assertTrue(nnAddrsFromDN.add(bpsa.getNNSocketAddress())); } } assertEquals("",Joiner.on(",").join(Sets.symmetricDifference(nnAddrsFromCluster,nnAddrsFromDN))); } finally { if (cluster != null) { cluster.shutdown(); } } }

Class: org.apache.hadoop.hdfs.server.datanode.TestSimulatedFSDataset

BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testFSDatasetFactory(){ final Configuration conf=new Configuration(); FsDatasetSpi.Factory f=FsDatasetSpi.Factory.getFactory(conf); assertEquals(FsDatasetFactory.class,f.getClass()); assertFalse(f.isSimulated()); SimulatedFSDataset.setFactory(conf); FsDatasetSpi.Factory s=FsDatasetSpi.Factory.getFactory(conf); assertEquals(SimulatedFSDataset.Factory.class,s.getClass()); assertTrue(s.isSimulated()); }

IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testWriteRead() throws IOException { final SimulatedFSDataset fsdataset=getSimulatedFSDataset(); addSomeBlocks(fsdataset); for (int i=1; i <= NUMBLOCKS; ++i) { ExtendedBlock b=new ExtendedBlock(bpid,i,0,0); assertTrue(fsdataset.isValidBlock(b)); assertEquals(blockIdToLen(i),fsdataset.getLength(b)); checkBlockDataAndSize(fsdataset,b,blockIdToLen(i)); } }

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testInvalidate() throws IOException { final SimulatedFSDataset fsdataset=getSimulatedFSDataset(); int bytesAdded=addSomeBlocks(fsdataset); Block[] deleteBlocks=new Block[2]; deleteBlocks[0]=new Block(1,0,0); deleteBlocks[1]=new Block(2,0,0); fsdataset.invalidate(bpid,deleteBlocks); checkInvalidBlock(new ExtendedBlock(bpid,deleteBlocks[0])); checkInvalidBlock(new ExtendedBlock(bpid,deleteBlocks[1])); long sizeDeleted=blockIdToLen(1) + blockIdToLen(2); assertEquals(bytesAdded - sizeDeleted,fsdataset.getDfsUsed()); assertEquals(fsdataset.getCapacity() - bytesAdded + sizeDeleted,fsdataset.getRemaining()); for (int i=3; i <= NUMBLOCKS; ++i) { Block b=new Block(i,0,0); assertTrue(fsdataset.isValidBlock(new ExtendedBlock(bpid,b))); } }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testGetBlockReport() throws IOException { SimulatedFSDataset fsdataset=getSimulatedFSDataset(); BlockListAsLongs blockReport=fsdataset.getBlockReport(bpid); assertEquals(0,blockReport.getNumberOfBlocks()); addSomeBlocks(fsdataset); blockReport=fsdataset.getBlockReport(bpid); assertEquals(NUMBLOCKS,blockReport.getNumberOfBlocks()); for ( Block b : blockReport) { assertNotNull(b); assertEquals(blockIdToLen(b.getBlockId()),b.getNumBytes()); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testGetMetaData() throws IOException { final SimulatedFSDataset fsdataset=getSimulatedFSDataset(); ExtendedBlock b=new ExtendedBlock(bpid,1,5,0); try { assertTrue(fsdataset.getMetaDataInputStream(b) == null); assertTrue("Expected an IO exception",false); } catch ( IOException e) { } addSomeBlocks(fsdataset); b=new ExtendedBlock(bpid,1,0,0); InputStream metaInput=fsdataset.getMetaDataInputStream(b); DataInputStream metaDataInput=new DataInputStream(metaInput); short version=metaDataInput.readShort(); assertEquals(BlockMetadataHeader.VERSION,version); DataChecksum checksum=DataChecksum.newDataChecksum(metaDataInput); assertEquals(DataChecksum.Type.NULL,checksum.getChecksumType()); assertEquals(0,checksum.getChecksumSize()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testInjectionNonEmpty() throws IOException { SimulatedFSDataset fsdataset=getSimulatedFSDataset(); BlockListAsLongs blockReport=fsdataset.getBlockReport(bpid); assertEquals(0,blockReport.getNumberOfBlocks()); int bytesAdded=addSomeBlocks(fsdataset); blockReport=fsdataset.getBlockReport(bpid); assertEquals(NUMBLOCKS,blockReport.getNumberOfBlocks()); for ( Block b : blockReport) { assertNotNull(b); assertEquals(blockIdToLen(b.getBlockId()),b.getNumBytes()); } fsdataset=null; SimulatedFSDataset sfsdataset=getSimulatedFSDataset(); bytesAdded+=addSomeBlocks(sfsdataset,NUMBLOCKS + 1); sfsdataset.getBlockReport(bpid); assertEquals(NUMBLOCKS,blockReport.getNumberOfBlocks()); sfsdataset.getBlockReport(bpid); assertEquals(NUMBLOCKS,blockReport.getNumberOfBlocks()); sfsdataset.injectBlocks(bpid,blockReport); blockReport=sfsdataset.getBlockReport(bpid); assertEquals(NUMBLOCKS * 2,blockReport.getNumberOfBlocks()); for ( Block b : blockReport) { assertNotNull(b); assertEquals(blockIdToLen(b.getBlockId()),b.getNumBytes()); assertEquals(blockIdToLen(b.getBlockId()),sfsdataset.getLength(new ExtendedBlock(bpid,b))); } assertEquals(bytesAdded,sfsdataset.getDfsUsed()); assertEquals(sfsdataset.getCapacity() - bytesAdded,sfsdataset.getRemaining()); conf.setLong(SimulatedFSDataset.CONFIG_PROPERTY_CAPACITY,10); try { sfsdataset=getSimulatedFSDataset(); sfsdataset.addBlockPool(bpid,conf); sfsdataset.injectBlocks(bpid,blockReport); assertTrue("Expected an IO exception",false); } catch ( IOException e) { } }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testInjectionEmpty() throws IOException { SimulatedFSDataset fsdataset=getSimulatedFSDataset(); BlockListAsLongs blockReport=fsdataset.getBlockReport(bpid); assertEquals(0,blockReport.getNumberOfBlocks()); int bytesAdded=addSomeBlocks(fsdataset); blockReport=fsdataset.getBlockReport(bpid); assertEquals(NUMBLOCKS,blockReport.getNumberOfBlocks()); for ( Block b : blockReport) { assertNotNull(b); assertEquals(blockIdToLen(b.getBlockId()),b.getNumBytes()); } SimulatedFSDataset sfsdataset=getSimulatedFSDataset(); sfsdataset.injectBlocks(bpid,blockReport); blockReport=sfsdataset.getBlockReport(bpid); assertEquals(NUMBLOCKS,blockReport.getNumberOfBlocks()); for ( Block b : blockReport) { assertNotNull(b); assertEquals(blockIdToLen(b.getBlockId()),b.getNumBytes()); assertEquals(blockIdToLen(b.getBlockId()),sfsdataset.getLength(new ExtendedBlock(bpid,b))); } assertEquals(bytesAdded,sfsdataset.getDfsUsed()); assertEquals(sfsdataset.getCapacity() - bytesAdded,sfsdataset.getRemaining()); }

Class: org.apache.hadoop.hdfs.server.datanode.TestStartSecureDataNode

InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier 
@Test public void testSecureNameNode() throws Exception { MiniDFSCluster cluster=null; try { String nnPrincipal=System.getProperty("dfs.namenode.kerberos.principal"); String nnSpnegoPrincipal=System.getProperty("dfs.namenode.kerberos.internal.spnego.principal"); String nnKeyTab=System.getProperty("dfs.namenode.keytab.file"); assertNotNull("NameNode principal was not specified",nnPrincipal); assertNotNull("NameNode SPNEGO principal was not specified",nnSpnegoPrincipal); assertNotNull("NameNode keytab was not specified",nnKeyTab); String dnPrincipal=System.getProperty("dfs.datanode.kerberos.principal"); String dnKeyTab=System.getProperty("dfs.datanode.keytab.file"); assertNotNull("DataNode principal was not specified",dnPrincipal); assertNotNull("DataNode keytab was not specified",dnKeyTab); Configuration conf=new HdfsConfiguration(); conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION,"kerberos"); conf.set(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY,nnPrincipal); conf.set(DFSConfigKeys.DFS_NAMENODE_KERBEROS_INTERNAL_SPNEGO_PRINCIPAL_KEY,nnSpnegoPrincipal); conf.set(DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY,nnKeyTab); conf.set(DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY,dnPrincipal); conf.set(DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY,dnKeyTab); conf.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY,"127.0.0.1:1004"); conf.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY,"127.0.0.1:1006"); conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,"700"); cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_OF_DATANODES).checkDataNodeAddrConfig(true).build(); cluster.waitActive(); assertTrue(cluster.isDataNodeUp()); } catch ( Exception ex) { ex.printStackTrace(); throw ex; } finally { if (cluster != null) { cluster.shutdown(); } } }

TestInitializer AssumptionSetter HybridVerifier 
@Before public void testExternalKdcRunning(){ Assume.assumeTrue(isExternalKdcRunning()); }

Class: org.apache.hadoop.hdfs.server.datanode.TestStorageReport

InternalCallVerifier IdentityVerifier ConditionMatcher HybridVerifier 
/** * Ensure that storage type and storage state are propagated * in Storage Reports. */ @Test public void testStorageReportHasStorageTypeAndState() throws IOException { assertNotSame(storageType,StorageType.DEFAULT); NameNode nn=cluster.getNameNode(); DataNode dn=cluster.getDataNodes().get(0); DatanodeProtocolClientSideTranslatorPB nnSpy=DataNodeTestUtils.spyOnBposToNN(dn,nn); DataNodeTestUtils.triggerHeartbeat(dn); ArgumentCaptor captor=ArgumentCaptor.forClass(StorageReport[].class); Mockito.verify(nnSpy).sendHeartbeat(any(DatanodeRegistration.class),captor.capture(),anyLong(),anyLong(),anyInt(),anyInt(),anyInt()); StorageReport[] reports=captor.getValue(); for ( StorageReport report : reports) { assertThat(report.getStorage().getStorageType(),is(storageType)); assertThat(report.getStorage().getState(),is(DatanodeStorage.State.NORMAL)); } }

Class: org.apache.hadoop.hdfs.server.datanode.TestTransferRbw

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testTransferRbw() throws Exception { final HdfsConfiguration conf=new HdfsConfiguration(); final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION).build(); try { cluster.waitActive(); final DistributedFileSystem fs=cluster.getFileSystem(); final Path p=new Path("/foo"); final int size=(1 << 16) + RAN.nextInt(1 << 16); LOG.info("size = " + size); final FSDataOutputStream out=fs.create(p,REPLICATION); final byte[] bytes=new byte[1024]; for (int remaining=size; remaining > 0; ) { RAN.nextBytes(bytes); final int len=bytes.length < remaining ? bytes.length : remaining; out.write(bytes,0,len); out.hflush(); remaining-=len; } final ReplicaBeingWritten oldrbw; final DataNode newnode; final DatanodeInfo newnodeinfo; final String bpid=cluster.getNamesystem().getBlockPoolId(); { final DataNode oldnode=cluster.getDataNodes().get(0); oldrbw=getRbw(oldnode,bpid); LOG.info("oldrbw = " + oldrbw); cluster.startDataNodes(conf,1,true,null,null); newnode=cluster.getDataNodes().get(REPLICATION); final DatanodeInfo oldnodeinfo; { final DatanodeInfo[] datatnodeinfos=cluster.getNameNodeRpc().getDatanodeReport(DatanodeReportType.LIVE); Assert.assertEquals(2,datatnodeinfos.length); int i=0; for (DatanodeRegistration dnReg=newnode.getDNRegistrationForBP(bpid); i < datatnodeinfos.length && !datatnodeinfos[i].equals(dnReg); i++) ; Assert.assertTrue(i < datatnodeinfos.length); newnodeinfo=datatnodeinfos[i]; oldnodeinfo=datatnodeinfos[1 - i]; } final ExtendedBlock b=new ExtendedBlock(bpid,oldrbw.getBlockId(),oldrbw.getBytesAcked(),oldrbw.getGenerationStamp()); final BlockOpResponseProto s=DFSTestUtil.transferRbw(b,DFSClientAdapter.getDFSClient(fs),oldnodeinfo,newnodeinfo); Assert.assertEquals(Status.SUCCESS,s.getStatus()); } final ReplicaBeingWritten newrbw=getRbw(newnode,bpid); LOG.info("newrbw = " + newrbw); Assert.assertEquals(oldrbw.getBlockId(),newrbw.getBlockId()); Assert.assertEquals(oldrbw.getGenerationStamp(),newrbw.getGenerationStamp()); Assert.assertEquals(oldrbw.getVisibleLength(),newrbw.getVisibleLength()); LOG.info("DONE"); } finally { cluster.shutdown(); } }

Class: org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.TestFsDatasetImpl

TestInitializer InternalCallVerifier EqualityVerifier HybridVerifier 
@Before public void setUp() throws IOException { final DataNode datanode=Mockito.mock(DataNode.class); storage=Mockito.mock(DataStorage.class); Configuration conf=new Configuration(); final DNConf dnConf=new DNConf(conf); when(datanode.getConf()).thenReturn(conf); when(datanode.getDnConf()).thenReturn(dnConf); createStorageDirs(storage,conf,NUM_INIT_VOLUMES); dataset=new FsDatasetImpl(datanode,storage,conf); assertEquals(NUM_INIT_VOLUMES,dataset.getVolumes().size()); assertEquals(0,dataset.getNumFailedVolumes()); }

Class: org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.TestInterDatanodeProtocol

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test for{@link FsDatasetImpl#updateReplicaUnderRecovery(ExtendedBlock,long,long)} */ @Test public void testUpdateReplicaUnderRecovery() throws IOException { MiniDFSCluster cluster=null; try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); cluster.waitActive(); String bpid=cluster.getNamesystem().getBlockPoolId(); DistributedFileSystem dfs=cluster.getFileSystem(); String filestr="/foo"; Path filepath=new Path(filestr); DFSTestUtil.createFile(dfs,filepath,1024L,(short)3,0L); final LocatedBlock locatedblock=getLastLocatedBlock(DFSClientAdapter.getDFSClient(dfs).getNamenode(),filestr); final DatanodeInfo[] datanodeinfo=locatedblock.getLocations(); Assert.assertTrue(datanodeinfo.length > 0); final DataNode datanode=cluster.getDataNode(datanodeinfo[0].getIpcPort()); Assert.assertTrue(datanode != null); final ExtendedBlock b=locatedblock.getBlock(); final long recoveryid=b.getGenerationStamp() + 1; final long newlength=b.getNumBytes() - 1; final FsDatasetSpi fsdataset=DataNodeTestUtils.getFSDataset(datanode); final ReplicaRecoveryInfo rri=fsdataset.initReplicaRecovery(new RecoveringBlock(b,null,recoveryid)); final ReplicaInfo replica=FsDatasetTestUtil.fetchReplicaInfo(fsdataset,bpid,b.getBlockId()); Assert.assertEquals(ReplicaState.RUR,replica.getState()); FsDatasetImpl.checkReplicaFiles(replica); { final ExtendedBlock tmp=new ExtendedBlock(b.getBlockPoolId(),rri.getBlockId(),rri.getNumBytes() - 1,rri.getGenerationStamp()); try { fsdataset.updateReplicaUnderRecovery(tmp,recoveryid,newlength); Assert.fail(); } catch ( IOException ioe) { System.out.println("GOOD: getting " + ioe); } } final String storageID=fsdataset.updateReplicaUnderRecovery(new ExtendedBlock(b.getBlockPoolId(),rri),recoveryid,newlength); assertTrue(storageID != null); } finally { if (cluster != null) cluster.shutdown(); } }

APIUtilityVerifier UtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Test {@link FsDatasetImpl#initReplicaRecovery(String,ReplicaMap,Block,long,long)} */ @Test public void testInitReplicaRecovery() throws IOException { final long firstblockid=10000L; final long gs=7777L; final long length=22L; final ReplicaMap map=new ReplicaMap(this); String bpid="BP-TEST"; final Block[] blocks=new Block[5]; for (int i=0; i < blocks.length; i++) { blocks[i]=new Block(firstblockid + i,length,gs); map.add(bpid,createReplicaInfo(blocks[i])); } { final Block b=blocks[0]; final ReplicaInfo originalInfo=map.get(bpid,b); final long recoveryid=gs + 1; final ReplicaRecoveryInfo recoveryInfo=FsDatasetImpl.initReplicaRecovery(bpid,map,blocks[0],recoveryid,DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT); assertEquals(originalInfo,recoveryInfo); final ReplicaUnderRecovery updatedInfo=(ReplicaUnderRecovery)map.get(bpid,b); Assert.assertEquals(originalInfo.getBlockId(),updatedInfo.getBlockId()); Assert.assertEquals(recoveryid,updatedInfo.getRecoveryID()); final long recoveryid2=gs + 2; final ReplicaRecoveryInfo recoveryInfo2=FsDatasetImpl.initReplicaRecovery(bpid,map,blocks[0],recoveryid2,DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT); assertEquals(originalInfo,recoveryInfo2); final ReplicaUnderRecovery updatedInfo2=(ReplicaUnderRecovery)map.get(bpid,b); Assert.assertEquals(originalInfo.getBlockId(),updatedInfo2.getBlockId()); Assert.assertEquals(recoveryid2,updatedInfo2.getRecoveryID()); try { FsDatasetImpl.initReplicaRecovery(bpid,map,b,recoveryid,DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT); Assert.fail(); } catch ( RecoveryInProgressException ripe) { System.out.println("GOOD: getting " + ripe); } } { final long recoveryid=gs + 1; final Block b=new Block(firstblockid - 1,length,gs); ReplicaRecoveryInfo r=FsDatasetImpl.initReplicaRecovery(bpid,map,b,recoveryid,DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT); Assert.assertNull("Data-node should not have this replica.",r); } { final long recoveryid=gs - 1; final Block b=new Block(firstblockid + 1,length,gs); try { FsDatasetImpl.initReplicaRecovery(bpid,map,b,recoveryid,DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT); Assert.fail(); } catch ( IOException ioe) { System.out.println("GOOD: getting " + ioe); } } { final long recoveryid=gs + 1; final Block b=new Block(firstblockid,length,gs + 1); try { FsDatasetImpl.initReplicaRecovery(bpid,map,b,recoveryid,DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT); fail("InitReplicaRecovery should fail because replica's " + "gs is less than the block's gs"); } catch ( IOException e) { e.getMessage().startsWith("replica.getGenerationStamp() < block.getGenerationStamp(), block="); } } }

UtilityVerifier ExceptionVerifier HybridVerifier 
/** * Test to verify that InterDatanode RPC timesout as expected when * the server DN does not respond. */ @Test(expected=SocketTimeoutException.class) public void testInterDNProtocolTimeout() throws Throwable { final Server server=new TestServer(1,true); server.start(); final InetSocketAddress addr=NetUtils.getConnectAddress(server); DatanodeID fakeDnId=DFSTestUtil.getLocalDatanodeID(addr.getPort()); DatanodeInfo dInfo=new DatanodeInfo(fakeDnId); InterDatanodeProtocol proxy=null; try { proxy=DataNode.createInterDataNodeProtocolProxy(dInfo,conf,500,false); proxy.initReplicaRecovery(new RecoveringBlock(new ExtendedBlock("bpid",1),null,100)); fail("Expected SocketTimeoutException exception, but did not get."); } finally { if (proxy != null) { RPC.stopProxy(proxy); } server.stop(); } }

Class: org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.TestReplicaMap

UtilityVerifier InternalCallVerifier NullVerifier HybridVerifier 
@Test public void testRemove(){ try { map.remove(bpid,null); fail("Expected exception not thrown"); } catch ( IllegalArgumentException expected) { } Block b=new Block(block); b.setGenerationStamp(0); assertNull(map.remove(bpid,b)); b.setGenerationStamp(block.getGenerationStamp()); b.setBlockId(0); assertNull(map.remove(bpid,b)); assertNotNull(map.remove(bpid,block)); assertNull(map.remove(bpid,0)); map.add(bpid,new FinalizedReplica(block,null,null)); assertNotNull(map.remove(bpid,block.getBlockId())); }

UtilityVerifier InternalCallVerifier NullVerifier HybridVerifier 
/** * Test for ReplicasMap.get(Block) and ReplicasMap.get(long) tests */ @Test public void testGet(){ try { map.get(bpid,null); fail("Expected exception not thrown"); } catch ( IllegalArgumentException expected) { } assertNotNull(map.get(bpid,block)); Block b=new Block(block); b.setGenerationStamp(0); assertNull(map.get(bpid,b)); b.setGenerationStamp(block.getGenerationStamp()); b.setBlockId(0); assertNull(map.get(bpid,b)); assertNotNull(map.get(bpid,block.getBlockId())); assertNull(map.get(bpid,0)); }

Class: org.apache.hadoop.hdfs.server.namenode.FSAclBaseTest

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testSetPermissionCannotSetAclBit() throws IOException { FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750)); fs.setPermission(path,FsPermission.createImmutable((short)0700)); assertPermission((short)0700); fs.setPermission(path,new FsAclPermission(FsPermission.createImmutable((short)0755))); INode inode=cluster.getNamesystem().getFSDirectory().getNode(path.toUri().getPath(),false); assertNotNull(inode); FsPermission perm=inode.getFsPermission(); assertNotNull(perm); assertEquals(0755,perm.toShort()); assertEquals(0755,perm.toExtendedShort()); assertAclFeature(false); }

Class: org.apache.hadoop.hdfs.server.namenode.FSXAttrBaseTest

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
/** * Tests for setting xattr * 1. Set xattr with XAttrSetFlag.CREATE|XAttrSetFlag.REPLACE flag. * 2. Set xattr with illegal name. * 3. Set xattr without XAttrSetFlag. * 4. Set xattr and total number exceeds max limit. * 5. Set xattr and name is too long. * 6. Set xattr and value is too long. */ @Test(timeout=120000) public void testSetXAttr() throws Exception { FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750)); fs.setXAttr(path,name1,value1,EnumSet.of(XAttrSetFlag.CREATE,XAttrSetFlag.REPLACE)); Map xattrs=fs.getXAttrs(path); Assert.assertEquals(xattrs.size(),1); Assert.assertArrayEquals(value1,xattrs.get(name1)); fs.removeXAttr(path,name1); try { fs.setXAttr(path,null,value1,EnumSet.of(XAttrSetFlag.CREATE,XAttrSetFlag.REPLACE)); Assert.fail("Setting xattr with null name should fail."); } catch ( NullPointerException e) { GenericTestUtils.assertExceptionContains("XAttr name cannot be null",e); } catch ( RemoteException e) { GenericTestUtils.assertExceptionContains("XAttr name cannot be null",e); } try { fs.setXAttr(path,"user.",value1,EnumSet.of(XAttrSetFlag.CREATE,XAttrSetFlag.REPLACE)); Assert.fail("Setting xattr with empty name should fail."); } catch ( RemoteException e) { assertEquals("Unexpected RemoteException: " + e,e.getClassName(),HadoopIllegalArgumentException.class.getCanonicalName()); GenericTestUtils.assertExceptionContains("XAttr name cannot be empty",e); } catch ( HadoopIllegalArgumentException e) { GenericTestUtils.assertExceptionContains("XAttr name cannot be empty",e); } try { fs.setXAttr(path,"a1",value1,EnumSet.of(XAttrSetFlag.CREATE,XAttrSetFlag.REPLACE)); Assert.fail("Setting xattr with invalid name prefix or without " + "name prefix should fail."); } catch ( RemoteException e) { assertEquals("Unexpected RemoteException: " + e,e.getClassName(),HadoopIllegalArgumentException.class.getCanonicalName()); GenericTestUtils.assertExceptionContains("XAttr name must be prefixed",e); } catch ( HadoopIllegalArgumentException e) { GenericTestUtils.assertExceptionContains("XAttr name must be prefixed",e); } fs.setXAttr(path,name1,value1); xattrs=fs.getXAttrs(path); Assert.assertEquals(xattrs.size(),1); Assert.assertArrayEquals(value1,xattrs.get(name1)); fs.removeXAttr(path,name1); fs.setXAttr(path,name1,value1,EnumSet.of(XAttrSetFlag.CREATE)); fs.setXAttr(path,name1,newValue1,EnumSet.of(XAttrSetFlag.CREATE,XAttrSetFlag.REPLACE)); xattrs=fs.getXAttrs(path); Assert.assertEquals(xattrs.size(),1); Assert.assertArrayEquals(newValue1,xattrs.get(name1)); fs.removeXAttr(path,name1); fs.setXAttr(path,name1,value1); fs.setXAttr(path,name2,value2); fs.setXAttr(path,name3,null); try { fs.setXAttr(path,name4,null); Assert.fail("Setting xattr should fail if total number of xattrs " + "for inode exceeds max limit."); } catch ( IOException e) { GenericTestUtils.assertExceptionContains("Cannot add additional XAttr",e); } fs.removeXAttr(path,name1); fs.removeXAttr(path,name2); fs.removeXAttr(path,name3); String longName="user.0123456789abcdefX"; try { fs.setXAttr(path,longName,null); Assert.fail("Setting xattr should fail if name is too long."); } catch ( IOException e) { GenericTestUtils.assertExceptionContains("XAttr is too big",e); GenericTestUtils.assertExceptionContains("total size is 17",e); } byte[] longValue=new byte[MAX_SIZE]; try { fs.setXAttr(path,"user.a",longValue); Assert.fail("Setting xattr should fail if value is too long."); } catch ( IOException e) { GenericTestUtils.assertExceptionContains("XAttr is too big",e); GenericTestUtils.assertExceptionContains("total size is 17",e); } String name="user.111"; byte[] value=new byte[MAX_SIZE - 3]; fs.setXAttr(path,name,value); }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
/** * Tests for replacing xattr * 1. Replace an xattr using XAttrSetFlag.REPLACE. * 2. Replace an xattr which doesn't exist and expect an exception. * 3. Create multiple xattrs and replace some. * 4. Restart NN and save checkpoint scenarios. */ @Test(timeout=120000) public void testReplaceXAttr() throws Exception { FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750)); fs.setXAttr(path,name1,value1,EnumSet.of(XAttrSetFlag.CREATE)); fs.setXAttr(path,name1,newValue1,EnumSet.of(XAttrSetFlag.REPLACE)); Map xattrs=fs.getXAttrs(path); Assert.assertEquals(xattrs.size(),1); Assert.assertArrayEquals(newValue1,xattrs.get(name1)); fs.removeXAttr(path,name1); try { fs.setXAttr(path,name1,value1,EnumSet.of(XAttrSetFlag.REPLACE)); Assert.fail("Replacing xattr which does not exist should fail."); } catch ( IOException e) { } fs.setXAttr(path,name1,value1,EnumSet.of(XAttrSetFlag.CREATE)); fs.setXAttr(path,name2,value2,EnumSet.of(XAttrSetFlag.CREATE)); fs.setXAttr(path,name2,null,EnumSet.of(XAttrSetFlag.REPLACE)); xattrs=fs.getXAttrs(path); Assert.assertEquals(xattrs.size(),2); Assert.assertArrayEquals(value1,xattrs.get(name1)); Assert.assertArrayEquals(new byte[0],xattrs.get(name2)); restart(false); initFileSystem(); xattrs=fs.getXAttrs(path); Assert.assertEquals(xattrs.size(),2); Assert.assertArrayEquals(value1,xattrs.get(name1)); Assert.assertArrayEquals(new byte[0],xattrs.get(name2)); restart(true); initFileSystem(); xattrs=fs.getXAttrs(path); Assert.assertEquals(xattrs.size(),2); Assert.assertArrayEquals(value1,xattrs.get(name1)); Assert.assertArrayEquals(new byte[0],xattrs.get(name2)); fs.removeXAttr(path,name1); fs.removeXAttr(path,name2); }

UtilityVerifier EqualityVerifier HybridVerifier 
/** * removexattr tests. Test that removexattr throws an exception if any of * the following are true: * an xattr that was requested doesn't exist * the caller specifies an unknown namespace * the caller doesn't have access to the namespace * the caller doesn't have permission to get the value of the xattr * the caller does not have "execute" (scan) access to the parent directory * the caller has only read access to the owning directory * the caller has only execute access to the owning directory and execute * access to the actual entity * the caller does not have execute access to the owning directory and write * access to the actual entity */ @Test(timeout=120000) public void testRemoveXAttrPermissions() throws Exception { FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750)); fs.setXAttr(path,name1,value1,EnumSet.of(XAttrSetFlag.CREATE)); fs.setXAttr(path,name2,value2,EnumSet.of(XAttrSetFlag.CREATE)); fs.setXAttr(path,name3,null,EnumSet.of(XAttrSetFlag.CREATE)); try { fs.removeXAttr(path,name2); fs.removeXAttr(path,name2); Assert.fail("expected IOException"); } catch ( IOException e) { GenericTestUtils.assertExceptionContains("No matching attributes found",e); } final String expectedExceptionString="An XAttr name must be prefixed " + "with user/trusted/security/system/raw, followed by a '.'"; try { fs.removeXAttr(path,"wackynamespace.foo"); Assert.fail("expected IOException"); } catch ( RemoteException e) { assertEquals("Unexpected RemoteException: " + e,e.getClassName(),HadoopIllegalArgumentException.class.getCanonicalName()); GenericTestUtils.assertExceptionContains(expectedExceptionString,e); } catch ( HadoopIllegalArgumentException e) { GenericTestUtils.assertExceptionContains(expectedExceptionString,e); } final UserGroupInformation user=UserGroupInformation.createUserForTesting("user",new String[]{"mygroup"}); fs.setXAttr(path,"trusted.foo","1234".getBytes()); try { user.doAs(new PrivilegedExceptionAction(){ @Override public Object run() throws Exception { final FileSystem userFs=dfsCluster.getFileSystem(); userFs.removeXAttr(path,"trusted.foo"); return null; } } ); Assert.fail("expected IOException"); } catch ( IOException e) { GenericTestUtils.assertExceptionContains("User doesn't have permission",e); } finally { fs.removeXAttr(path,"trusted.foo"); } fs.setPermission(path,new FsPermission((short)0700)); try { user.doAs(new PrivilegedExceptionAction(){ @Override public Object run() throws Exception { final FileSystem userFs=dfsCluster.getFileSystem(); userFs.removeXAttr(path,name1); return null; } } ); Assert.fail("expected IOException"); } catch ( IOException e) { GenericTestUtils.assertExceptionContains("Permission denied",e); } final Path childDir=new Path(path,"child" + pathCount); FileSystem.mkdirs(fs,childDir,FsPermission.createImmutable((short)0700)); fs.setXAttr(childDir,name1,"1234".getBytes()); try { user.doAs(new PrivilegedExceptionAction(){ @Override public Object run() throws Exception { final FileSystem userFs=dfsCluster.getFileSystem(); userFs.removeXAttr(childDir,name1); return null; } } ); Assert.fail("expected IOException"); } catch ( IOException e) { GenericTestUtils.assertExceptionContains("Permission denied",e); } fs.setPermission(path,new FsPermission((short)0704)); try { user.doAs(new PrivilegedExceptionAction(){ @Override public Object run() throws Exception { final FileSystem userFs=dfsCluster.getFileSystem(); userFs.removeXAttr(childDir,name1); return null; } } ); Assert.fail("expected IOException"); } catch ( IOException e) { GenericTestUtils.assertExceptionContains("Permission denied",e); } fs.setPermission(path,new FsPermission((short)0701)); fs.setPermission(childDir,new FsPermission((short)0701)); try { user.doAs(new PrivilegedExceptionAction(){ @Override public Object run() throws Exception { final FileSystem userFs=dfsCluster.getFileSystem(); userFs.removeXAttr(childDir,name1); return null; } } ); Assert.fail("expected IOException"); } catch ( IOException e) { GenericTestUtils.assertExceptionContains("Permission denied",e); } fs.setPermission(path,new FsPermission((short)0701)); fs.setPermission(childDir,new FsPermission((short)0706)); user.doAs(new PrivilegedExceptionAction(){ @Override public Object run() throws Exception { final FileSystem userFs=dfsCluster.getFileSystem(); userFs.removeXAttr(childDir,name1); return null; } } ); }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
/** * Tests for creating xattr * 1. Create an xattr using XAttrSetFlag.CREATE. * 2. Create an xattr which already exists and expect an exception. * 3. Create multiple xattrs. * 4. Restart NN and save checkpoint scenarios. */ @Test(timeout=120000) public void testCreateXAttr() throws Exception { FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750)); fs.setXAttr(path,name1,value1,EnumSet.of(XAttrSetFlag.CREATE)); Map xattrs=fs.getXAttrs(path); Assert.assertEquals(xattrs.size(),1); Assert.assertArrayEquals(value1,xattrs.get(name1)); fs.removeXAttr(path,name1); xattrs=fs.getXAttrs(path); Assert.assertEquals(xattrs.size(),0); fs.setXAttr(path,name1,value1,EnumSet.of(XAttrSetFlag.CREATE)); try { fs.setXAttr(path,name1,value1,EnumSet.of(XAttrSetFlag.CREATE)); Assert.fail("Creating xattr which already exists should fail."); } catch ( IOException e) { } fs.removeXAttr(path,name1); fs.setXAttr(path,name1,value1,EnumSet.of(XAttrSetFlag.CREATE)); fs.setXAttr(path,name2,null,EnumSet.of(XAttrSetFlag.CREATE)); xattrs=fs.getXAttrs(path); Assert.assertEquals(xattrs.size(),2); Assert.assertArrayEquals(value1,xattrs.get(name1)); Assert.assertArrayEquals(new byte[0],xattrs.get(name2)); restart(false); initFileSystem(); xattrs=fs.getXAttrs(path); Assert.assertEquals(xattrs.size(),2); Assert.assertArrayEquals(value1,xattrs.get(name1)); Assert.assertArrayEquals(new byte[0],xattrs.get(name2)); restart(true); initFileSystem(); xattrs=fs.getXAttrs(path); Assert.assertEquals(xattrs.size(),2); Assert.assertArrayEquals(value1,xattrs.get(name1)); Assert.assertArrayEquals(new byte[0],xattrs.get(name2)); fs.removeXAttr(path,name1); fs.removeXAttr(path,name2); }

UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=120000) public void testRawXAttrs() throws Exception { final UserGroupInformation user=UserGroupInformation.createUserForTesting("user",new String[]{"mygroup"}); FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750)); fs.setXAttr(rawPath,raw1,value1,EnumSet.of(XAttrSetFlag.CREATE,XAttrSetFlag.REPLACE)); { final byte[] value=fs.getXAttr(rawPath,raw1); Assert.assertArrayEquals(value,value1); } { final Map xattrs=fs.getXAttrs(rawPath); Assert.assertEquals(xattrs.size(),1); Assert.assertArrayEquals(value1,xattrs.get(raw1)); fs.removeXAttr(rawPath,raw1); } { fs.setXAttr(rawPath,raw1,value1,EnumSet.of(XAttrSetFlag.CREATE)); fs.setXAttr(rawPath,raw1,newValue1,EnumSet.of(XAttrSetFlag.CREATE,XAttrSetFlag.REPLACE)); final Map xattrs=fs.getXAttrs(rawPath); Assert.assertEquals(xattrs.size(),1); Assert.assertArrayEquals(newValue1,xattrs.get(raw1)); fs.removeXAttr(rawPath,raw1); } { fs.setXAttr(rawPath,raw1,value1,EnumSet.of(XAttrSetFlag.CREATE)); fs.setXAttr(rawPath,raw2,value2,EnumSet.of(XAttrSetFlag.CREATE)); final List xattrNames=fs.listXAttrs(rawPath); assertTrue(xattrNames.contains(raw1)); assertTrue(xattrNames.contains(raw2)); assertTrue(xattrNames.size() == 2); fs.removeXAttr(rawPath,raw1); fs.removeXAttr(rawPath,raw2); } { fs.setXAttr(rawPath,raw1,value1,EnumSet.of(XAttrSetFlag.CREATE)); fs.setXAttr(rawPath,raw2,value2,EnumSet.of(XAttrSetFlag.CREATE)); final List xattrNames=fs.listXAttrs(path); assertTrue(xattrNames.size() == 0); fs.removeXAttr(rawPath,raw1); fs.removeXAttr(rawPath,raw2); } { user.doAs(new PrivilegedExceptionAction(){ @Override public Object run() throws Exception { final FileSystem userFs=dfsCluster.getFileSystem(); try { userFs.setXAttr(path,raw1,value1); fail("setXAttr should have thrown"); } catch ( AccessControlException e) { } try { userFs.setXAttr(rawPath,raw1,value1); fail("setXAttr should have thrown"); } catch ( AccessControlException e) { } try { userFs.getXAttrs(rawPath); fail("getXAttrs should have thrown"); } catch ( AccessControlException e) { } try { userFs.getXAttrs(path); fail("getXAttrs should have thrown"); } catch ( AccessControlException e) { } try { userFs.getXAttr(rawPath,raw1); fail("getXAttr should have thrown"); } catch ( AccessControlException e) { } try { userFs.getXAttr(path,raw1); fail("getXAttr should have thrown"); } catch ( AccessControlException e) { } return null; } } ); } { fs.setXAttr(rawPath,raw1,value1); user.doAs(new PrivilegedExceptionAction(){ @Override public Object run() throws Exception { final FileSystem userFs=dfsCluster.getFileSystem(); try { userFs.getXAttr(rawPath,raw1); fail("getXAttr should have thrown"); } catch ( AccessControlException e) { } try { userFs.getXAttr(path,raw1); fail("getXAttr should have thrown"); } catch ( AccessControlException e) { } final List xattrNames=userFs.listXAttrs(path); assertTrue(xattrNames.size() == 0); try { userFs.listXAttrs(rawPath); fail("listXAttrs on raw path should have thrown"); } catch ( AccessControlException e) { } return null; } } ); fs.removeXAttr(rawPath,raw1); } }

UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
/** * Test the listXAttrs api. * listXAttrs on a path that doesn't exist. * listXAttrs on a path with no XAttrs * Check basic functionality. * Check that read access to parent dir is not enough to get xattr names * Check that write access to the parent dir is not enough to get names * Check that execute/scan access to the parent dir is sufficient to get * xattr names. */ @Test(timeout=120000) public void testListXAttrs() throws Exception { final UserGroupInformation user=UserGroupInformation.createUserForTesting("user",new String[]{"mygroup"}); try { fs.listXAttrs(path); fail("expected FileNotFoundException"); } catch ( FileNotFoundException e) { GenericTestUtils.assertExceptionContains("cannot find",e); } FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750)); final List noXAttrs=fs.listXAttrs(path); assertTrue("XAttrs were found?",noXAttrs.size() == 0); fs.setXAttr(path,name1,value1,EnumSet.of(XAttrSetFlag.CREATE)); fs.setXAttr(path,name2,value2,EnumSet.of(XAttrSetFlag.CREATE)); final List xattrNames=fs.listXAttrs(path); assertTrue(xattrNames.contains(name1)); assertTrue(xattrNames.contains(name2)); assertTrue(xattrNames.size() == 2); fs.setPermission(path,new FsPermission((short)0704)); final Path childDir=new Path(path,"child" + pathCount); FileSystem.mkdirs(fs,childDir,FsPermission.createImmutable((short)0700)); fs.setXAttr(childDir,name1,"1234".getBytes()); try { user.doAs(new PrivilegedExceptionAction(){ @Override public Object run() throws Exception { final FileSystem userFs=dfsCluster.getFileSystem(); userFs.listXAttrs(childDir); return null; } } ); fail("expected IOException"); } catch ( IOException e) { GenericTestUtils.assertExceptionContains("Permission denied",e); } fs.setPermission(path,new FsPermission((short)0702)); try { user.doAs(new PrivilegedExceptionAction(){ @Override public Object run() throws Exception { final FileSystem userFs=dfsCluster.getFileSystem(); userFs.listXAttrs(childDir); return null; } } ); fail("expected IOException"); } catch ( IOException e) { GenericTestUtils.assertExceptionContains("Permission denied",e); } fs.setPermission(path,new FsPermission((short)0701)); user.doAs(new PrivilegedExceptionAction(){ @Override public Object run() throws Exception { final FileSystem userFs=dfsCluster.getFileSystem(); userFs.listXAttrs(childDir); return null; } } ); fs.setXAttr(childDir,"trusted.myxattr","1234".getBytes()); user.doAs(new PrivilegedExceptionAction(){ @Override public Object run() throws Exception { final FileSystem userFs=dfsCluster.getFileSystem(); assertTrue(userFs.listXAttrs(childDir).size() == 1); return null; } } ); assertTrue(fs.listXAttrs(childDir).size() == 2); }

APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test(timeout=120000) public void testXAttrAcl() throws Exception { FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750)); fs.setOwner(path,BRUCE.getUserName(),null); FileSystem fsAsBruce=createFileSystem(BRUCE); FileSystem fsAsDiana=createFileSystem(DIANA); fsAsBruce.setXAttr(path,name1,value1); Map xattrs; try { xattrs=fsAsDiana.getXAttrs(path); Assert.fail("Diana should not have read access to get xattrs"); } catch ( AccessControlException e) { } fsAsBruce.modifyAclEntries(path,Lists.newArrayList(aclEntry(ACCESS,USER,DIANA.getUserName(),READ))); xattrs=fsAsDiana.getXAttrs(path); Assert.assertArrayEquals(value1,xattrs.get(name1)); try { fsAsDiana.removeXAttr(path,name1); Assert.fail("Diana should not have write access to remove xattrs"); } catch ( AccessControlException e) { } try { fsAsDiana.setXAttr(path,name2,value2); Assert.fail("Diana should not have write access to set xattrs"); } catch ( AccessControlException e) { } fsAsBruce.modifyAclEntries(path,Lists.newArrayList(aclEntry(ACCESS,USER,DIANA.getUserName(),ALL))); fsAsDiana.setXAttr(path,name2,value2); Assert.assertArrayEquals(value2,fsAsDiana.getXAttrs(path).get(name2)); fsAsDiana.removeXAttr(path,name1); fsAsDiana.removeXAttr(path,name2); }

Class: org.apache.hadoop.hdfs.server.namenode.TestAddBlockRetry

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testAddBlockRetryShouldReturnBlockWithLocations() throws Exception { final String src="/testAddBlockRetryShouldReturnBlockWithLocations"; NamenodeProtocols nameNodeRpc=cluster.getNameNodeRpc(); nameNodeRpc.create(src,FsPermission.getFileDefault(),"clientName",new EnumSetWritable(EnumSet.of(CreateFlag.CREATE)),true,(short)3,1024,null); LOG.info("Starting first addBlock for " + src); LocatedBlock lb1=nameNodeRpc.addBlock(src,"clientName",null,null,INodeId.GRANDFATHER_INODE_ID,null); assertTrue("Block locations should be present",lb1.getLocations().length > 0); cluster.restartNameNode(); nameNodeRpc=cluster.getNameNodeRpc(); LocatedBlock lb2=nameNodeRpc.addBlock(src,"clientName",null,null,INodeId.GRANDFATHER_INODE_ID,null); assertEquals("Blocks are not equal",lb1.getBlock(),lb2.getBlock()); assertTrue("Wrong locations with retry",lb2.getLocations().length > 0); }

Class: org.apache.hadoop.hdfs.server.namenode.TestAllowFormat

UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier 
/** * start MiniDFScluster, try formatting with different settings * @throws IOException * @throws InterruptedException */ @Test public void testAllowFormat() throws IOException { LOG.info("--starting mini cluster"); NameNode nn; config.setBoolean(DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY,true); cluster=new MiniDFSCluster.Builder(config).manageDataDfsDirs(false).manageNameDfsDirs(false).build(); cluster.waitActive(); assertNotNull(cluster); nn=cluster.getNameNode(); assertNotNull(nn); LOG.info("Mini cluster created OK"); LOG.info("Verifying format will fail with allowformat false"); config.setBoolean(DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY,false); try { cluster.shutdown(); NameNode.format(config); fail("Format succeeded, when it should have failed"); } catch ( IOException e) { assertTrue("Exception was not about formatting Namenode",e.getMessage().startsWith("The option " + DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY)); LOG.info("Expected failure: " + StringUtils.stringifyException(e)); LOG.info("Done verifying format will fail with allowformat false"); } LOG.info("Verifying format will succeed with allowformat true"); config.setBoolean(DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY,true); NameNode.format(config); LOG.info("Done verifying format will succeed with allowformat true"); }

Class: org.apache.hadoop.hdfs.server.namenode.TestAuditLogger

BooleanVerifier EqualityVerifier HybridVerifier 
/** * Minor test related to HADOOP-9155. Verify that during a * FileSystem.setPermission() operation, the stat passed in during the * logAuditEvent() call returns the new permission rather than the old * permission. */ @Test public void testAuditLoggerWithSetPermission() throws IOException { Configuration conf=new HdfsConfiguration(); conf.set(DFS_NAMENODE_AUDIT_LOGGERS_KEY,DummyAuditLogger.class.getName()); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build(); try { cluster.waitClusterUp(); assertTrue(DummyAuditLogger.initialized); DummyAuditLogger.resetLogCount(); FileSystem fs=cluster.getFileSystem(); long time=System.currentTimeMillis(); final Path p=new Path("/"); fs.setTimes(p,time,time); fs.setPermission(p,new FsPermission(TEST_PERMISSION)); assertEquals(TEST_PERMISSION,DummyAuditLogger.foundPermission); assertEquals(2,DummyAuditLogger.logCount); } finally { cluster.shutdown(); } }

BooleanVerifier EqualityVerifier HybridVerifier 
/** * Tests that AuditLogger works as expected. */ @Test public void testAuditLogger() throws IOException { Configuration conf=new HdfsConfiguration(); conf.set(DFS_NAMENODE_AUDIT_LOGGERS_KEY,DummyAuditLogger.class.getName()); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build(); try { cluster.waitClusterUp(); assertTrue(DummyAuditLogger.initialized); DummyAuditLogger.resetLogCount(); FileSystem fs=cluster.getFileSystem(); long time=System.currentTimeMillis(); fs.setTimes(new Path("/"),time,time); assertEquals(1,DummyAuditLogger.logCount); } finally { cluster.shutdown(); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testWebHdfsAuditLogger() throws IOException, URISyntaxException { Configuration conf=new HdfsConfiguration(); conf.set(DFS_NAMENODE_AUDIT_LOGGERS_KEY,DummyAuditLogger.class.getName()); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build(); GetOpParam.Op op=GetOpParam.Op.GETFILESTATUS; try { cluster.waitClusterUp(); assertTrue(DummyAuditLogger.initialized); URI uri=new URI("http",NetUtils.getHostPortString(cluster.getNameNode().getHttpAddress()),"/webhdfs/v1/",op.toQueryString(),null); HttpURLConnection conn=(HttpURLConnection)uri.toURL().openConnection(); conn.setRequestMethod(op.getType().toString()); conn.connect(); assertEquals(200,conn.getResponseCode()); conn.disconnect(); assertEquals(1,DummyAuditLogger.logCount); assertEquals("127.0.0.1",DummyAuditLogger.remoteAddr); conn=(HttpURLConnection)uri.toURL().openConnection(); conn.setRequestMethod(op.getType().toString()); conn.setRequestProperty("X-Forwarded-For","1.1.1.1"); conn.connect(); assertEquals(200,conn.getResponseCode()); conn.disconnect(); assertEquals(2,DummyAuditLogger.logCount); assertEquals("127.0.0.1",DummyAuditLogger.remoteAddr); conf.set(ProxyServers.CONF_HADOOP_PROXYSERVERS,"127.0.0.1"); ProxyUsers.refreshSuperUserGroupsConfiguration(conf); conn=(HttpURLConnection)uri.toURL().openConnection(); conn.setRequestMethod(op.getType().toString()); conn.setRequestProperty("X-Forwarded-For","1.1.1.1"); conn.connect(); assertEquals(200,conn.getResponseCode()); conn.disconnect(); assertEquals(3,DummyAuditLogger.logCount); assertEquals("1.1.1.1",DummyAuditLogger.remoteAddr); } finally { cluster.shutdown(); } }

Class: org.apache.hadoop.hdfs.server.namenode.TestAuditLogs

TestInitializer InternalCallVerifier EqualityVerifier HybridVerifier 
@Before public void setupCluster() throws Exception { configureAuditLogs(); conf=new HdfsConfiguration(); final long precision=1L; conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY,precision); conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,10000L); conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY,true); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_AUDIT_LOG_ASYNC_KEY,useAsyncLog); util=new DFSTestUtil.Builder().setName("TestAuditAllowed").setNumFiles(20).build(); cluster=new MiniDFSCluster.Builder(conf).numDataNodes(4).build(); fs=cluster.getFileSystem(); util.createFiles(fs,fileName); Logger logger=((Log4JLogger)FSNamesystem.auditLog).getLogger(); @SuppressWarnings("unchecked") List appenders=Collections.list(logger.getAllAppenders()); assertEquals(1,appenders.size()); assertEquals(useAsyncLog,appenders.get(0) instanceof AsyncAppender); fnames=util.getFileNames(fileName); util.waitReplication(fs,fileName,(short)3); userGroupInfo=UserGroupInformation.createUserForTesting(username,groups); }

Class: org.apache.hadoop.hdfs.server.namenode.TestBackupNode

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Ensure that the backupnode will tail edits from the NN * and keep in sync, even while the NN rolls, checkpoints * occur, etc. */ @Test public void testBackupNodeTailsEdits() throws Exception { Configuration conf=new HdfsConfiguration(); HAUtil.setAllowStandbyReads(conf,true); MiniDFSCluster cluster=null; FileSystem fileSys=null; BackupNode backup=null; try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).build(); fileSys=cluster.getFileSystem(); backup=startBackupNode(conf,StartupOption.BACKUP,1); BackupImage bnImage=(BackupImage)backup.getFSImage(); testBNInSync(cluster,backup,1); NameNode nn=cluster.getNameNode(); NamenodeProtocols nnRpc=nn.getRpcServer(); nnRpc.rollEditLog(); assertEquals(bnImage.getEditLog().getCurSegmentTxId(),nn.getFSImage().getEditLog().getCurSegmentTxId()); testBNInSync(cluster,backup,2); long nnImageBefore=nn.getFSImage().getStorage().getMostRecentCheckpointTxId(); backup.doCheckpoint(); long nnImageAfter=nn.getFSImage().getStorage().getMostRecentCheckpointTxId(); assertTrue("nn should have received new checkpoint. before: " + nnImageBefore + " after: "+ nnImageAfter,nnImageAfter > nnImageBefore); testBNInSync(cluster,backup,3); StorageDirectory sd=bnImage.getStorage().getStorageDir(0); backup.stop(); backup=null; EditLogFile editsLog=FSImageTestUtil.findLatestEditsLog(sd); assertEquals(editsLog.getFirstTxId(),nn.getFSImage().getEditLog().getCurSegmentTxId()); assertTrue("Should not have finalized " + editsLog,editsLog.isInProgress()); assertTrue(fileSys.mkdirs(new Path("/edit-while-bn-down"))); backup=startBackupNode(conf,StartupOption.BACKUP,1); testBNInSync(cluster,backup,4); assertNotNull(backup.getNamesystem().getFileInfo("/edit-while-bn-down",false)); backup.stop(false); assertTrue(fileSys.mkdirs(new Path("/edit-while-bn-down-2"))); } finally { LOG.info("Shutting down..."); if (backup != null) backup.stop(); if (fileSys != null) fileSys.close(); if (cluster != null) cluster.shutdown(); } assertStorageDirsMatch(cluster.getNameNode(),backup); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Verify that a file can be read both from NameNode and BackupNode. */ @Test public void testCanReadData() throws IOException { Path file1=new Path("/fileToRead.dat"); Configuration conf=new HdfsConfiguration(); MiniDFSCluster cluster=null; FileSystem fileSys=null; BackupNode backup=null; try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true).build(); fileSys=cluster.getFileSystem(); long txid=cluster.getNameNodeRpc().getTransactionID(); backup=startBackupNode(conf,StartupOption.BACKUP,1); waitCheckpointDone(cluster,txid); String rpcAddrKeyPreffix=DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY + ".bnCluster"; String nnAddr=cluster.getNameNode().getNameNodeAddressHostPortString(); conf.get(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY); String bnAddr=backup.getNameNodeAddressHostPortString(); conf.set(DFSConfigKeys.DFS_NAMESERVICES,"bnCluster"); conf.set(DFSConfigKeys.DFS_NAMESERVICE_ID,"bnCluster"); conf.set(DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX + ".bnCluster","nnActive, nnBackup"); conf.set(rpcAddrKeyPreffix + ".nnActive",nnAddr); conf.set(rpcAddrKeyPreffix + ".nnBackup",bnAddr); cluster.startDataNodes(conf,3,true,StartupOption.REGULAR,null); DFSTestUtil.createFile(fileSys,file1,fileSize,fileSize,blockSize,(short)3,seed); FileSystem bnFS=FileSystem.get(new Path("hdfs://" + bnAddr).toUri(),conf); String nnData=DFSTestUtil.readFile(fileSys,file1); String bnData=DFSTestUtil.readFile(bnFS,file1); assertEquals("Data read from BackupNode and NameNode is not the same.",nnData,bnData); } catch ( IOException e) { LOG.error("Error in TestBackupNode: ",e); assertTrue(e.getLocalizedMessage(),false); } finally { if (fileSys != null) fileSys.close(); if (backup != null) backup.stop(); if (cluster != null) cluster.shutdown(); } }

Class: org.apache.hadoop.hdfs.server.namenode.TestBlockUnderConstruction

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test NameNode.getBlockLocations(..) on reading un-closed files. */ @Test public void testGetBlockLocations() throws IOException { final NamenodeProtocols namenode=cluster.getNameNodeRpc(); final Path p=new Path(BASE_DIR,"file2.dat"); final String src=p.toString(); final FSDataOutputStream out=TestFileCreation.createFile(hdfs,p,3); int len=BLOCK_SIZE >>> 1; writeFile(p,out,len); for (int i=1; i < NUM_BLOCKS; ) { final LocatedBlocks lb=namenode.getBlockLocations(src,0,len); final List blocks=lb.getLocatedBlocks(); assertEquals(i,blocks.size()); final Block b=blocks.get(blocks.size() - 1).getBlock().getLocalBlock(); assertTrue(b instanceof BlockInfoUnderConstruction); if (++i < NUM_BLOCKS) { writeFile(p,out,BLOCK_SIZE); len+=BLOCK_SIZE; } } out.close(); }

Class: org.apache.hadoop.hdfs.server.namenode.TestCacheDirectives

UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=60000) public void testCreateAndModifyPools() throws Exception { String poolName="pool1"; String ownerName="abc"; String groupName="123"; FsPermission mode=new FsPermission((short)0755); long limit=150; dfs.addCachePool(new CachePoolInfo(poolName).setOwnerName(ownerName).setGroupName(groupName).setMode(mode).setLimit(limit)); RemoteIterator iter=dfs.listCachePools(); CachePoolInfo info=iter.next().getInfo(); assertEquals(poolName,info.getPoolName()); assertEquals(ownerName,info.getOwnerName()); assertEquals(groupName,info.getGroupName()); ownerName="def"; groupName="456"; mode=new FsPermission((short)0700); limit=151; dfs.modifyCachePool(new CachePoolInfo(poolName).setOwnerName(ownerName).setGroupName(groupName).setMode(mode).setLimit(limit)); iter=dfs.listCachePools(); info=iter.next().getInfo(); assertEquals(poolName,info.getPoolName()); assertEquals(ownerName,info.getOwnerName()); assertEquals(groupName,info.getGroupName()); assertEquals(mode,info.getMode()); assertEquals(limit,(long)info.getLimit()); dfs.removeCachePool(poolName); iter=dfs.listCachePools(); assertFalse("expected no cache pools after deleting pool",iter.hasNext()); proto.listCachePools(null); try { proto.removeCachePool("pool99"); fail("expected to get an exception when " + "removing a non-existent pool."); } catch ( IOException ioe) { GenericTestUtils.assertExceptionContains("Cannot remove non-existent",ioe); } try { proto.removeCachePool(poolName); fail("expected to get an exception when " + "removing a non-existent pool."); } catch ( IOException ioe) { GenericTestUtils.assertExceptionContains("Cannot remove non-existent",ioe); } iter=dfs.listCachePools(); assertFalse("expected no cache pools after deleting pool",iter.hasNext()); }

UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testMaxRelativeExpiry() throws Exception { try { dfs.addCachePool(new CachePoolInfo("failpool").setMaxRelativeExpiryMs(-1l)); fail("Added a pool with a negative max expiry."); } catch ( InvalidRequestException e) { GenericTestUtils.assertExceptionContains("negative",e); } try { dfs.addCachePool(new CachePoolInfo("failpool").setMaxRelativeExpiryMs(Long.MAX_VALUE - 1)); fail("Added a pool with too big of a max expiry."); } catch ( InvalidRequestException e) { GenericTestUtils.assertExceptionContains("too big",e); } CachePoolInfo coolPool=new CachePoolInfo("coolPool"); final long poolExpiration=1000 * 60 * 10l; dfs.addCachePool(coolPool.setMaxRelativeExpiryMs(poolExpiration)); RemoteIterator poolIt=dfs.listCachePools(); CachePoolInfo listPool=poolIt.next().getInfo(); assertFalse("Should only be one pool",poolIt.hasNext()); assertEquals("Expected max relative expiry to match set value",poolExpiration,listPool.getMaxRelativeExpiryMs().longValue()); try { dfs.addCachePool(coolPool.setMaxRelativeExpiryMs(-1l)); fail("Added a pool with a negative max expiry."); } catch ( InvalidRequestException e) { assertExceptionContains("negative",e); } try { dfs.modifyCachePool(coolPool.setMaxRelativeExpiryMs(CachePoolInfo.RELATIVE_EXPIRY_NEVER + 1)); fail("Added a pool with too big of a max expiry."); } catch ( InvalidRequestException e) { assertExceptionContains("too big",e); } CacheDirectiveInfo defaultExpiry=new CacheDirectiveInfo.Builder().setPath(new Path("/blah")).setPool(coolPool.getPoolName()).build(); dfs.addCacheDirective(defaultExpiry); RemoteIterator dirIt=dfs.listCacheDirectives(defaultExpiry); CacheDirectiveInfo listInfo=dirIt.next().getInfo(); assertFalse("Should only have one entry in listing",dirIt.hasNext()); long listExpiration=listInfo.getExpiration().getAbsoluteMillis() - new Date().getTime(); assertTrue("Directive expiry should be approximately the pool's max expiry",Math.abs(listExpiration - poolExpiration) < 10 * 1000); CacheDirectiveInfo.Builder builder=new CacheDirectiveInfo.Builder().setPath(new Path("/lolcat")).setPool(coolPool.getPoolName()); try { dfs.addCacheDirective(builder.setExpiration(Expiration.newRelative(poolExpiration + 1)).build()); fail("Added a directive that exceeds pool's max relative expiration"); } catch ( InvalidRequestException e) { assertExceptionContains("exceeds the max relative expiration",e); } try { dfs.addCacheDirective(builder.setExpiration(Expiration.newAbsolute(new Date().getTime() + poolExpiration + (10 * 1000))).build()); fail("Added a directive that exceeds pool's max relative expiration"); } catch ( InvalidRequestException e) { assertExceptionContains("exceeds the max relative expiration",e); } try { dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry).setId(listInfo.getId()).setExpiration(Expiration.newRelative(poolExpiration + 1)).build()); fail("Modified a directive to exceed pool's max relative expiration"); } catch ( InvalidRequestException e) { assertExceptionContains("exceeds the max relative expiration",e); } try { dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry).setId(listInfo.getId()).setExpiration(Expiration.newAbsolute(new Date().getTime() + poolExpiration + (10 * 1000))).build()); fail("Modified a directive to exceed pool's max relative expiration"); } catch ( InvalidRequestException e) { assertExceptionContains("exceeds the max relative expiration",e); } try { dfs.addCacheDirective(builder.setExpiration(Expiration.newRelative(Long.MAX_VALUE)).build()); fail("Added a directive with a gigantic max value"); } catch ( IllegalArgumentException e) { assertExceptionContains("is too far in the future",e); } try { dfs.addCacheDirective(builder.setExpiration(Expiration.newAbsolute(Long.MAX_VALUE)).build()); fail("Added a directive with a gigantic max value"); } catch ( InvalidRequestException e) { assertExceptionContains("is too far in the future",e); } try { dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry).setId(listInfo.getId()).setExpiration(Expiration.NEVER).build()); fail("Modified a directive to exceed pool's max relative expiration"); } catch ( InvalidRequestException e) { assertExceptionContains("exceeds the max relative expiration",e); } try { dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry).setId(listInfo.getId()).setExpiration(Expiration.newAbsolute(Long.MAX_VALUE)).build()); fail("Modified a directive to exceed pool's max relative expiration"); } catch ( InvalidRequestException e) { assertExceptionContains("is too far in the future",e); } CachePoolInfo destPool=new CachePoolInfo("destPool"); dfs.addCachePool(destPool.setMaxRelativeExpiryMs(poolExpiration / 2)); try { dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry).setId(listInfo.getId()).setPool(destPool.getPoolName()).build()); fail("Modified a directive to a pool with a lower max expiration"); } catch ( InvalidRequestException e) { assertExceptionContains("exceeds the max relative expiration",e); } dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry).setId(listInfo.getId()).setPool(destPool.getPoolName()).setExpiration(Expiration.newRelative(poolExpiration / 2)).build()); dirIt=dfs.listCacheDirectives(new CacheDirectiveInfo.Builder().setPool(destPool.getPoolName()).build()); listInfo=dirIt.next().getInfo(); listExpiration=listInfo.getExpiration().getAbsoluteMillis() - new Date().getTime(); assertTrue("Unexpected relative expiry " + listExpiration + " expected approximately "+ poolExpiration / 2,Math.abs(poolExpiration / 2 - listExpiration) < 10 * 1000); dfs.modifyCachePool(destPool.setMaxRelativeExpiryMs(CachePoolInfo.RELATIVE_EXPIRY_NEVER)); poolIt=dfs.listCachePools(); listPool=poolIt.next().getInfo(); while (!listPool.getPoolName().equals(destPool.getPoolName())) { listPool=poolIt.next().getInfo(); } assertEquals("Expected max relative expiry to match set value",CachePoolInfo.RELATIVE_EXPIRY_NEVER,listPool.getMaxRelativeExpiryMs().longValue()); dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(listInfo.getId()).setExpiration(Expiration.newRelative(RELATIVE_EXPIRY_NEVER)).build()); dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(listInfo.getId()).setExpiration(Expiration.newRelative(RELATIVE_EXPIRY_NEVER - 1)).build()); }

UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=120000) public void testLimit() throws Exception { try { dfs.addCachePool(new CachePoolInfo("poolofnegativity").setLimit(-99l)); fail("Should not be able to set a negative limit"); } catch ( InvalidRequestException e) { GenericTestUtils.assertExceptionContains("negative",e); } final String destiny="poolofdestiny"; final Path path1=new Path("/destiny"); DFSTestUtil.createFile(dfs,path1,2 * BLOCK_SIZE,(short)1,0x9494); final CachePoolInfo poolInfo=new CachePoolInfo(destiny).setLimit(2 * BLOCK_SIZE - 1); dfs.addCachePool(poolInfo); final CacheDirectiveInfo info1=new CacheDirectiveInfo.Builder().setPool(destiny).setPath(path1).build(); try { dfs.addCacheDirective(info1); fail("Should not be able to cache when there is no more limit"); } catch ( InvalidRequestException e) { GenericTestUtils.assertExceptionContains("remaining capacity",e); } poolInfo.setLimit(2 * BLOCK_SIZE); dfs.modifyCachePool(poolInfo); long id1=dfs.addCacheDirective(info1); waitForCachePoolStats(dfs,2 * BLOCK_SIZE,2 * BLOCK_SIZE,1,1,poolInfo,"testLimit:1"); final Path path2=new Path("/failure"); DFSTestUtil.createFile(dfs,path2,BLOCK_SIZE,(short)1,0x9495); try { dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPool(destiny).setPath(path2).build(),EnumSet.noneOf(CacheFlag.class)); fail("Should not be able to add another cached file"); } catch ( InvalidRequestException e) { GenericTestUtils.assertExceptionContains("remaining capacity",e); } poolInfo.setLimit(BLOCK_SIZE); dfs.modifyCachePool(poolInfo); waitForCachePoolStats(dfs,2 * BLOCK_SIZE,0,1,0,poolInfo,"testLimit:2"); RemoteIterator it=dfs.listCachePools(); assertTrue("Expected a cache pool",it.hasNext()); CachePoolStats stats=it.next().getStats(); assertEquals("Overlimit bytes should be difference of needed and limit",BLOCK_SIZE,stats.getBytesOverlimit()); CachePoolInfo inadequate=new CachePoolInfo("poolofinadequacy").setLimit(BLOCK_SIZE); dfs.addCachePool(inadequate); try { dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(info1).setId(id1).setPool(inadequate.getPoolName()).build(),EnumSet.noneOf(CacheFlag.class)); } catch ( InvalidRequestException e) { GenericTestUtils.assertExceptionContains("remaining capacity",e); } dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(info1).setId(id1).setPool(inadequate.getPoolName()).build(),EnumSet.of(CacheFlag.FORCE)); dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPool(inadequate.getPoolName()).setPath(path1).build(),EnumSet.of(CacheFlag.FORCE)); }

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=60000) public void testCacheManagerRestart() throws Exception { SecondaryNameNode secondary=null; try { conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,"0.0.0.0:0"); secondary=new SecondaryNameNode(conf); final String pool="poolparty"; String groupName="partygroup"; FsPermission mode=new FsPermission((short)0777); long limit=747; dfs.addCachePool(new CachePoolInfo(pool).setGroupName(groupName).setMode(mode).setLimit(limit)); RemoteIterator pit=dfs.listCachePools(); assertTrue("No cache pools found",pit.hasNext()); CachePoolInfo info=pit.next().getInfo(); assertEquals(pool,info.getPoolName()); assertEquals(groupName,info.getGroupName()); assertEquals(mode,info.getMode()); assertEquals(limit,(long)info.getLimit()); assertFalse("Unexpected # of cache pools found",pit.hasNext()); int numEntries=10; String entryPrefix="/party-"; long prevId=-1; final Date expiry=new Date(); for (int i=0; i < numEntries; i++) { prevId=dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPath(new Path(entryPrefix + i)).setPool(pool).setExpiration(CacheDirectiveInfo.Expiration.newAbsolute(expiry.getTime())).build()); } RemoteIterator dit=dfs.listCacheDirectives(null); for (int i=0; i < numEntries; i++) { assertTrue("Unexpected # of cache entries: " + i,dit.hasNext()); CacheDirectiveInfo cd=dit.next().getInfo(); assertEquals(i + 1,cd.getId().longValue()); assertEquals(entryPrefix + i,cd.getPath().toUri().getPath()); assertEquals(pool,cd.getPool()); } assertFalse("Unexpected # of cache directives found",dit.hasNext()); secondary.doCheckpoint(); final String imagePool="imagePool"; dfs.addCachePool(new CachePoolInfo(imagePool)); prevId=dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPath(new Path("/image")).setPool(imagePool).build()); dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER); dfs.saveNamespace(); dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE); boolean fetchImage=secondary.doCheckpoint(); assertTrue("Secondary should have fetched a new fsimage from NameNode",fetchImage); dfs.removeCachePool(imagePool); cluster.restartNameNode(); pit=dfs.listCachePools(); assertTrue("No cache pools found",pit.hasNext()); info=pit.next().getInfo(); assertEquals(pool,info.getPoolName()); assertEquals(pool,info.getPoolName()); assertEquals(groupName,info.getGroupName()); assertEquals(mode,info.getMode()); assertEquals(limit,(long)info.getLimit()); assertFalse("Unexpected # of cache pools found",pit.hasNext()); dit=dfs.listCacheDirectives(null); for (int i=0; i < numEntries; i++) { assertTrue("Unexpected # of cache entries: " + i,dit.hasNext()); CacheDirectiveInfo cd=dit.next().getInfo(); assertEquals(i + 1,cd.getId().longValue()); assertEquals(entryPrefix + i,cd.getPath().toUri().getPath()); assertEquals(pool,cd.getPool()); assertEquals(expiry.getTime(),cd.getExpiration().getMillis()); } assertFalse("Unexpected # of cache directives found",dit.hasNext()); long nextId=dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPath(new Path("/foobar")).setPool(pool).build()); assertEquals(prevId + 1,nextId); } finally { if (secondary != null) { secondary.shutdown(); } } }

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=60000) public void testAddRemoveDirectives() throws Exception { proto.addCachePool(new CachePoolInfo("pool1").setMode(new FsPermission((short)0777))); proto.addCachePool(new CachePoolInfo("pool2").setMode(new FsPermission((short)0777))); proto.addCachePool(new CachePoolInfo("pool3").setMode(new FsPermission((short)0777))); proto.addCachePool(new CachePoolInfo("pool4").setMode(new FsPermission((short)0))); CacheDirectiveInfo alpha=new CacheDirectiveInfo.Builder().setPath(new Path("/alpha")).setPool("pool1").build(); CacheDirectiveInfo beta=new CacheDirectiveInfo.Builder().setPath(new Path("/beta")).setPool("pool2").build(); CacheDirectiveInfo delta=new CacheDirectiveInfo.Builder().setPath(new Path("/delta")).setPool("pool1").build(); long alphaId=addAsUnprivileged(alpha); long alphaId2=addAsUnprivileged(alpha); assertFalse("Expected to get unique directives when re-adding an " + "existing CacheDirectiveInfo",alphaId == alphaId2); long betaId=addAsUnprivileged(beta); try { addAsUnprivileged(new CacheDirectiveInfo.Builder().setPath(new Path("/unicorn")).setPool("no_such_pool").build()); fail("expected an error when adding to a non-existent pool."); } catch ( InvalidRequestException ioe) { GenericTestUtils.assertExceptionContains("Unknown pool",ioe); } try { addAsUnprivileged(new CacheDirectiveInfo.Builder().setPath(new Path("/blackhole")).setPool("pool4").build()); fail("expected an error when adding to a pool with " + "mode 0 (no permissions for anyone)."); } catch ( AccessControlException e) { GenericTestUtils.assertExceptionContains("Permission denied while accessing pool",e); } try { addAsUnprivileged(new CacheDirectiveInfo.Builder().setPath(new Path("/illegal:path/")).setPool("pool1").build()); fail("expected an error when adding a malformed path " + "to the cache directives."); } catch ( IllegalArgumentException e) { GenericTestUtils.assertExceptionContains("is not a valid DFS filename",e); } try { addAsUnprivileged(new CacheDirectiveInfo.Builder().setPath(new Path("/emptypoolname")).setReplication((short)1).setPool("").build()); fail("expected an error when adding a cache " + "directive with an empty pool name."); } catch ( InvalidRequestException e) { GenericTestUtils.assertExceptionContains("Invalid empty pool name",e); } long deltaId=addAsUnprivileged(delta); long relativeId=addAsUnprivileged(new CacheDirectiveInfo.Builder().setPath(new Path("relative")).setPool("pool1").build()); RemoteIterator iter; iter=dfs.listCacheDirectives(null); validateListAll(iter,alphaId,alphaId2,betaId,deltaId,relativeId); iter=dfs.listCacheDirectives(new CacheDirectiveInfo.Builder().setPool("pool3").build()); assertFalse(iter.hasNext()); iter=dfs.listCacheDirectives(new CacheDirectiveInfo.Builder().setPool("pool1").build()); validateListAll(iter,alphaId,alphaId2,deltaId,relativeId); iter=dfs.listCacheDirectives(new CacheDirectiveInfo.Builder().setPool("pool2").build()); validateListAll(iter,betaId); iter=dfs.listCacheDirectives(new CacheDirectiveInfo.Builder().setId(alphaId2).build()); validateListAll(iter,alphaId2); iter=dfs.listCacheDirectives(new CacheDirectiveInfo.Builder().setId(relativeId).build()); validateListAll(iter,relativeId); dfs.removeCacheDirective(betaId); iter=dfs.listCacheDirectives(new CacheDirectiveInfo.Builder().setPool("pool2").build()); assertFalse(iter.hasNext()); try { dfs.removeCacheDirective(betaId); fail("expected an error when removing a non-existent ID"); } catch ( InvalidRequestException e) { GenericTestUtils.assertExceptionContains("No directive with ID",e); } try { proto.removeCacheDirective(-42l); fail("expected an error when removing a negative ID"); } catch ( InvalidRequestException e) { GenericTestUtils.assertExceptionContains("Invalid negative ID",e); } try { proto.removeCacheDirective(43l); fail("expected an error when removing a non-existent ID"); } catch ( InvalidRequestException e) { GenericTestUtils.assertExceptionContains("No directive with ID",e); } dfs.removeCacheDirective(alphaId); dfs.removeCacheDirective(alphaId2); dfs.removeCacheDirective(deltaId); dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(relativeId).setReplication((short)555).build()); iter=dfs.listCacheDirectives(null); assertTrue(iter.hasNext()); CacheDirectiveInfo modified=iter.next().getInfo(); assertEquals(relativeId,modified.getId().longValue()); assertEquals((short)555,modified.getReplication().shortValue()); dfs.removeCacheDirective(relativeId); iter=dfs.listCacheDirectives(null); assertFalse(iter.hasNext()); CacheDirectiveInfo directive=new CacheDirectiveInfo.Builder().setPath(new Path(".")).setPool("pool1").build(); long id=dfs.addCacheDirective(directive); dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(directive).setId(id).setReplication((short)2).build()); dfs.removeCacheDirective(id); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=60000) public void testListCachePoolPermissions() throws Exception { final UserGroupInformation myUser=UserGroupInformation.createRemoteUser("myuser"); final DistributedFileSystem myDfs=(DistributedFileSystem)DFSTestUtil.getFileSystemAs(myUser,conf); final String poolName="poolparty"; dfs.addCachePool(new CachePoolInfo(poolName).setMode(new FsPermission((short)0700))); RemoteIterator it=myDfs.listCachePools(); CachePoolInfo info=it.next().getInfo(); assertFalse(it.hasNext()); assertEquals("Expected pool name",poolName,info.getPoolName()); assertNull("Unexpected owner name",info.getOwnerName()); assertNull("Unexpected group name",info.getGroupName()); assertNull("Unexpected mode",info.getMode()); assertNull("Unexpected limit",info.getLimit()); final long limit=99; dfs.modifyCachePool(new CachePoolInfo(poolName).setOwnerName(myUser.getShortUserName()).setLimit(limit)); it=myDfs.listCachePools(); info=it.next().getInfo(); assertFalse(it.hasNext()); assertEquals("Expected pool name",poolName,info.getPoolName()); assertEquals("Mismatched owner name",myUser.getShortUserName(),info.getOwnerName()); assertNotNull("Expected group name",info.getGroupName()); assertEquals("Mismatched mode",(short)0700,info.getMode().toShort()); assertEquals("Mismatched limit",limit,(long)info.getLimit()); }

Class: org.apache.hadoop.hdfs.server.namenode.TestCheckPointForSecurityTokens

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Tests save namespace. */ @Test public void testSaveNamespace() throws IOException { DistributedFileSystem fs=null; try { Configuration conf=new HdfsConfiguration(); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY,true); cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build(); cluster.waitActive(); fs=cluster.getFileSystem(); FSNamesystem namesystem=cluster.getNamesystem(); String renewer=UserGroupInformation.getLoginUser().getUserName(); Token token1=namesystem.getDelegationToken(new Text(renewer)); Token token2=namesystem.getDelegationToken(new Text(renewer)); DFSAdmin admin=new DFSAdmin(conf); String[] args=new String[]{"-saveNamespace"}; NameNode nn=cluster.getNameNode(); for ( StorageDirectory sd : nn.getFSImage().getStorage().dirIterable(null)) { EditLogFile log=FSImageTestUtil.findLatestEditsLog(sd); assertTrue(log.isInProgress()); log.validateLog(); long numTransactions=(log.getLastTxId() - log.getFirstTxId()) + 1; assertEquals("In-progress log " + log + " should have 5 transactions",5,numTransactions); ; } fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER); try { admin.run(args); } catch ( Exception e) { throw new IOException(e.getMessage()); } for ( StorageDirectory sd : nn.getFSImage().getStorage().dirIterable(null)) { EditLogFile log=FSImageTestUtil.findLatestEditsLog(sd); assertTrue(log.isInProgress()); log.validateLog(); long numTransactions=(log.getLastTxId() - log.getFirstTxId()) + 1; assertEquals("In-progress log " + log + " should only have START txn",1,numTransactions); } cluster.shutdown(); cluster=null; cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build(); cluster.waitActive(); try { renewToken(token1); renewToken(token2); } catch ( IOException e) { fail("Could not renew or cancel the token"); } namesystem=cluster.getNamesystem(); Token token3=namesystem.getDelegationToken(new Text(renewer)); Token token4=namesystem.getDelegationToken(new Text(renewer)); cluster.shutdown(); cluster=null; cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build(); cluster.waitActive(); namesystem=cluster.getNamesystem(); Token token5=namesystem.getDelegationToken(new Text(renewer)); try { renewToken(token1); renewToken(token2); renewToken(token3); renewToken(token4); renewToken(token5); } catch ( IOException e) { fail("Could not renew or cancel the token"); } cluster.shutdown(); cluster=null; cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build(); cluster.waitActive(); namesystem=cluster.getNamesystem(); try { renewToken(token1); cancelToken(token1); renewToken(token2); cancelToken(token2); renewToken(token3); cancelToken(token3); renewToken(token4); cancelToken(token4); renewToken(token5); cancelToken(token5); } catch ( IOException e) { fail("Could not renew or cancel the token"); } } finally { if (fs != null) fs.close(); if (cluster != null) cluster.shutdown(); } }

Class: org.apache.hadoop.hdfs.server.namenode.TestCheckpoint

UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testCommandLineParsing() throws ParseException { SecondaryNameNode.CommandLineOpts opts=new SecondaryNameNode.CommandLineOpts(); opts.parse(); assertNull(opts.getCommand()); opts.parse("-checkpoint"); assertEquals(SecondaryNameNode.CommandLineOpts.Command.CHECKPOINT,opts.getCommand()); assertFalse(opts.shouldForceCheckpoint()); opts.parse("-checkpoint","force"); assertEquals(SecondaryNameNode.CommandLineOpts.Command.CHECKPOINT,opts.getCommand()); assertTrue(opts.shouldForceCheckpoint()); opts.parse("-geteditsize"); assertEquals(SecondaryNameNode.CommandLineOpts.Command.GETEDITSIZE,opts.getCommand()); opts.parse("-format"); assertTrue(opts.shouldFormat()); try { opts.parse("-geteditsize","-checkpoint"); fail("Should have failed bad parsing for two actions"); } catch ( ParseException e) { LOG.warn("Encountered ",e); } try { opts.parse("-checkpoint","xx"); fail("Should have failed for bad checkpoint arg"); } catch ( ParseException e) { LOG.warn("Encountered ",e); } }

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
@Test(timeout=30000) public void testReloadOnEditReplayFailure() throws IOException { Configuration conf=new HdfsConfiguration(); FSDataOutputStream fos=null; SecondaryNameNode secondary=null; MiniDFSCluster cluster=null; FileSystem fs=null; try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build(); cluster.waitActive(); fs=cluster.getFileSystem(); secondary=startSecondaryNameNode(conf); fos=fs.create(new Path("tmpfile0")); fos.write(new byte[]{0,1,2,3}); secondary.doCheckpoint(); fos.write(new byte[]{0,1,2,3}); fos.hsync(); Mockito.doThrow(new IOException("Injecting failure during merge")).when(faultInjector).duringMerge(); try { secondary.doCheckpoint(); fail("Fault injection failed."); } catch ( IOException ioe) { } Mockito.reset(faultInjector); fos.write(new byte[]{0,1,2,3}); fos.hsync(); assertTrue("Another checkpoint should have reloaded image",secondary.doCheckpoint()); } finally { if (fs != null) { fs.close(); } cleanup(secondary); secondary=null; cleanup(cluster); cluster=null; Mockito.reset(faultInjector); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier PublicFieldVerifier HybridVerifier 
/** * Tests checkpoint in HDFS. */ @Test public void testCheckpoint() throws IOException { Path file1=new Path("checkpoint.dat"); Path file2=new Path("checkpoint2.dat"); Configuration conf=new HdfsConfiguration(); conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,"0.0.0.0:0"); replication=(short)conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY,3); MiniDFSCluster cluster=null; FileSystem fileSys=null; SecondaryNameNode secondary=null; try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build(); cluster.waitActive(); fileSys=cluster.getFileSystem(); assertTrue(!fileSys.exists(file1)); assertTrue(!fileSys.exists(file2)); DFSTestUtil.createFile(fileSys,file1,fileSize,fileSize,blockSize,replication,seed); checkFile(fileSys,file1,replication); secondary=startSecondaryNameNode(conf); secondary.doCheckpoint(); MetricsRecordBuilder rb=getMetrics(NN_METRICS); assertCounterGt("GetImageNumOps",0,rb); assertCounterGt("GetEditNumOps",0,rb); assertCounterGt("PutImageNumOps",0,rb); assertGaugeGt("GetImageAvgTime",0.0,rb); assertGaugeGt("GetEditAvgTime",0.0,rb); assertGaugeGt("PutImageAvgTime",0.0,rb); } finally { fileSys.close(); cleanup(secondary); secondary=null; cleanup(cluster); cluster=null; } Path tmpDir=new Path("/tmp_tmp"); try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build(); cluster.waitActive(); fileSys=cluster.getFileSystem(); checkFile(fileSys,file1,replication); cleanupFile(fileSys,file1); DFSTestUtil.createFile(fileSys,file2,fileSize,fileSize,blockSize,replication,seed); checkFile(fileSys,file2,replication); secondary=startSecondaryNameNode(conf); secondary.doCheckpoint(); FSDirectory secondaryFsDir=secondary.getFSNamesystem().dir; INode rootInMap=secondaryFsDir.getInode(secondaryFsDir.rootDir.getId()); assertSame(rootInMap,secondaryFsDir.rootDir); fileSys.delete(tmpDir,true); fileSys.mkdirs(tmpDir); secondary.doCheckpoint(); } finally { fileSys.close(); cleanup(secondary); secondary=null; cleanup(cluster); cluster=null; } cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build(); cluster.waitActive(); fileSys=cluster.getFileSystem(); assertTrue(!fileSys.exists(file1)); assertTrue(fileSys.exists(tmpDir)); try { checkFile(fileSys,file2,replication); } finally { fileSys.close(); cluster.shutdown(); cluster=null; } }

UtilityVerifier BooleanVerifier HybridVerifier 
/** * Test the importCheckpoint startup option. Verifies: * 1. if the NN already contains an image, it will not be allowed * to import a checkpoint. * 2. if the NN does not contain an image, importing a checkpoint * succeeds and re-saves the image */ @Test public void testImportCheckpoint() throws Exception { Configuration conf=new HdfsConfiguration(); Path testPath=new Path("/testfile"); SecondaryNameNode snn=null; MiniDFSCluster cluster=null; Collection nameDirs=null; try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).build(); nameDirs=cluster.getNameDirs(0); cluster.getFileSystem().mkdirs(testPath); snn=startSecondaryNameNode(conf); snn.doCheckpoint(); } finally { cleanup(snn); cleanup(cluster); cluster=null; } LOG.info("Trying to import checkpoint when the NameNode already " + "contains an image. This should fail."); try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false).startupOption(StartupOption.IMPORT).build(); fail("NameNode did not fail to start when it already contained " + "an image"); } catch ( IOException ioe) { GenericTestUtils.assertExceptionContains("NameNode already contains an image",ioe); } finally { cleanup(cluster); cluster=null; } LOG.info("Removing NN storage contents"); for ( URI uri : nameDirs) { File dir=new File(uri.getPath()); LOG.info("Cleaning " + dir); removeAndRecreateDir(dir); } LOG.info("Trying to import checkpoint"); try { cluster=new MiniDFSCluster.Builder(conf).format(false).numDataNodes(0).startupOption(StartupOption.IMPORT).build(); assertTrue("Path from checkpoint should exist after import",cluster.getFileSystem().exists(testPath)); FSImageTestUtil.assertNNHasCheckpoints(cluster,Ints.asList(3)); } finally { cleanup(cluster); cluster=null; } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
/** * Test case where the NN is configured with a name-only and an edits-only * dir, with storage-restore turned on. In this case, if the name-only dir * disappears and comes back, a new checkpoint after it has been restored * should function correctly. * @throws Exception */ @Test public void testCheckpointWithSeparateDirsAfterNameFails() throws Exception { MiniDFSCluster cluster=null; SecondaryNameNode secondary=null; File currentDir=null; Configuration conf=new HdfsConfiguration(); File base_dir=new File(MiniDFSCluster.getBaseDirectory()); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_RESTORE_KEY,true); conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,MiniDFSCluster.getBaseDirectory() + "/name-only"); conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,MiniDFSCluster.getBaseDirectory() + "/edits-only"); conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY,fileAsURI(new File(base_dir,"namesecondary1")).toString()); try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true).manageNameDfsDirs(false).build(); secondary=startSecondaryNameNode(conf); secondary.doCheckpoint(); NamenodeProtocols nn=cluster.getNameNodeRpc(); NNStorage storage=cluster.getNameNode().getFSImage().getStorage(); StorageDirectory sd0=storage.getStorageDir(0); assertEquals(NameNodeDirType.IMAGE,sd0.getStorageDirType()); currentDir=sd0.getCurrentDir(); assertEquals(0,FileUtil.chmod(currentDir.getAbsolutePath(),"000")); try { secondary.doCheckpoint(); fail("Did not fail to checkpoint when there are no valid storage dirs"); } catch ( IOException ioe) { GenericTestUtils.assertExceptionContains("No targets in destination storage",ioe); } assertEquals(0,FileUtil.chmod(currentDir.getAbsolutePath(),"755")); nn.restoreFailedStorage("true"); nn.rollEditLog(); secondary.doCheckpoint(); assertNNHasCheckpoints(cluster,ImmutableList.of(8)); assertParallelFilesInvariant(cluster,ImmutableList.of(secondary)); } finally { if (currentDir != null) { FileUtil.chmod(currentDir.getAbsolutePath(),"755"); } cleanup(secondary); secondary=null; cleanup(cluster); cluster=null; } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test that the secondary doesn't have to re-download image * if it hasn't changed. */ @Test public void testSecondaryImageDownload() throws IOException { LOG.info("Starting testSecondaryImageDownload"); Configuration conf=new HdfsConfiguration(); conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,"0.0.0.0:0"); Path dir=new Path("/checkpoint"); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(true).build(); cluster.waitActive(); FileSystem fileSys=cluster.getFileSystem(); FSImage image=cluster.getNameNode().getFSImage(); SecondaryNameNode secondary=null; try { assertTrue(!fileSys.exists(dir)); secondary=startSecondaryNameNode(conf); File secondaryDir=new File(MiniDFSCluster.getBaseDirectory(),"namesecondary1"); File secondaryCurrent=new File(secondaryDir,"current"); long expectedTxIdToDownload=cluster.getNameNode().getFSImage().getStorage().getMostRecentCheckpointTxId(); File secondaryFsImageBefore=new File(secondaryCurrent,NNStorage.getImageFileName(expectedTxIdToDownload)); File secondaryFsImageAfter=new File(secondaryCurrent,NNStorage.getImageFileName(expectedTxIdToDownload + 2)); assertFalse("Secondary should start with empty current/ dir " + "but " + secondaryFsImageBefore + " exists",secondaryFsImageBefore.exists()); assertTrue("Secondary should have loaded an image",secondary.doCheckpoint()); assertTrue("Secondary should have downloaded original image",secondaryFsImageBefore.exists()); assertTrue("Secondary should have created a new image",secondaryFsImageAfter.exists()); long fsimageLength=secondaryFsImageBefore.length(); assertEquals("Image size should not have changed",fsimageLength,secondaryFsImageAfter.length()); fileSys.mkdirs(dir); assertFalse("Another checkpoint should not have to re-load image",secondary.doCheckpoint()); for ( StorageDirectory sd : image.getStorage().dirIterable(NameNodeDirType.IMAGE)) { File imageFile=NNStorage.getImageFile(sd,NameNodeFile.IMAGE,expectedTxIdToDownload + 5); assertTrue("Image size increased",imageFile.length() > fsimageLength); } } finally { fileSys.close(); cleanup(secondary); secondary=null; cleanup(cluster); cluster=null; } }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Test that, if the edits dir is separate from the name dir, it is * properly locked. */ @Test public void testSeparateEditsDirLocking() throws IOException { Configuration conf=new HdfsConfiguration(); File nameDir=new File(MiniDFSCluster.getBaseDirectory(),"name"); File editsDir=new File(MiniDFSCluster.getBaseDirectory(),"testSeparateEditsDirLocking"); conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,nameDir.getAbsolutePath()); conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,editsDir.getAbsolutePath()); MiniDFSCluster cluster=null; StorageDirectory savedSd=null; try { cluster=new MiniDFSCluster.Builder(conf).manageNameDfsDirs(false).numDataNodes(0).build(); NNStorage storage=cluster.getNameNode().getFSImage().getStorage(); for ( StorageDirectory sd : storage.dirIterable(NameNodeDirType.EDITS)) { assertEquals(editsDir.getAbsoluteFile(),sd.getRoot()); assertLockFails(sd); savedSd=sd; } } finally { cleanup(cluster); cluster=null; } assertNotNull(savedSd); assertClusterStartFailsWhenDirLocked(conf,savedSd); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Starts two namenodes and two secondary namenodes, verifies that secondary * namenodes are configured correctly to talk to their respective namenodes * and can do the checkpoint. * @throws IOException */ @Test public void testMultipleSecondaryNamenodes() throws IOException { Configuration conf=new HdfsConfiguration(); String nameserviceId1="ns1"; String nameserviceId2="ns2"; conf.set(DFSConfigKeys.DFS_NAMESERVICES,nameserviceId1 + "," + nameserviceId2); MiniDFSCluster cluster=null; SecondaryNameNode secondary1=null; SecondaryNameNode secondary2=null; try { cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleFederatedTopology(conf.get(DFSConfigKeys.DFS_NAMESERVICES))).build(); Configuration snConf1=new HdfsConfiguration(cluster.getConfiguration(0)); Configuration snConf2=new HdfsConfiguration(cluster.getConfiguration(1)); InetSocketAddress nn1RpcAddress=cluster.getNameNode(0).getNameNodeAddress(); InetSocketAddress nn2RpcAddress=cluster.getNameNode(1).getNameNodeAddress(); String nn1=nn1RpcAddress.getHostName() + ":" + nn1RpcAddress.getPort(); String nn2=nn2RpcAddress.getHostName() + ":" + nn2RpcAddress.getPort(); snConf1.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,""); snConf2.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,""); snConf1.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,nameserviceId1),nn1); snConf2.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,nameserviceId2),nn2); secondary1=startSecondaryNameNode(snConf1); secondary2=startSecondaryNameNode(snConf2); assertEquals(secondary1.getNameNodeAddress().getPort(),nn1RpcAddress.getPort()); assertEquals(secondary2.getNameNodeAddress().getPort(),nn2RpcAddress.getPort()); assertTrue(secondary1.getNameNodeAddress().getPort() != secondary2.getNameNodeAddress().getPort()); secondary1.doCheckpoint(); secondary2.doCheckpoint(); } finally { cleanup(secondary1); secondary1=null; cleanup(secondary2); secondary2=null; cleanup(cluster); cluster=null; } }

APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testTooManyEditReplayFailures() throws IOException { Configuration conf=new HdfsConfiguration(); conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_MAX_RETRIES_KEY,"1"); conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_KEY,"1"); FSDataOutputStream fos=null; SecondaryNameNode secondary=null; MiniDFSCluster cluster=null; FileSystem fs=null; try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).checkExitOnShutdown(false).build(); cluster.waitActive(); fs=cluster.getFileSystem(); fos=fs.create(new Path("tmpfile0")); fos.write(new byte[]{0,1,2,3}); Mockito.doThrow(new IOException("Injecting failure during merge")).when(faultInjector).duringMerge(); secondary=startSecondaryNameNode(conf); secondary.doWork(); fail("2NN did not exit."); } catch ( ExitException ee) { ExitUtil.resetFirstExitException(); assertEquals("Max retries",1,secondary.getMergeErrorCount() - 1); } finally { if (fs != null) { fs.close(); } cleanup(secondary); secondary=null; cleanup(cluster); cluster=null; Mockito.reset(faultInjector); } }

UtilityVerifier BooleanVerifier HybridVerifier 
/** * Tests the following sequence of events: * - secondary successfully makes a checkpoint * - it then fails while trying to upload it * - it then fails again for the same reason * - it then tries to checkpoint a third time */ @Test public void testCheckpointAfterTwoFailedUploads() throws IOException { MiniDFSCluster cluster=null; SecondaryNameNode secondary=null; Configuration conf=new HdfsConfiguration(); try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(true).build(); secondary=startSecondaryNameNode(conf); Mockito.doThrow(new IOException("Injecting failure after rolling edit logs")).when(faultInjector).afterSecondaryCallsRollEditLog(); try { secondary.doCheckpoint(); fail("Should have failed upload"); } catch ( IOException ioe) { LOG.info("Got expected failure",ioe); assertTrue(ioe.toString().contains("Injecting failure")); } try { secondary.doCheckpoint(); fail("Should have failed upload"); } catch ( IOException ioe) { LOG.info("Got expected failure",ioe); assertTrue(ioe.toString().contains("Injecting failure")); } finally { Mockito.reset(faultInjector); } secondary.doCheckpoint(); } finally { cleanup(secondary); secondary=null; cleanup(cluster); cluster=null; } }

UtilityVerifier BooleanVerifier HybridVerifier 
/** * Test case where the name node is reformatted while the secondary namenode * is running. The secondary should shut itself down if if talks to a NN * with the wrong namespace. */ @Test public void testReformatNNBetweenCheckpoints() throws IOException { MiniDFSCluster cluster=null; SecondaryNameNode secondary=null; Configuration conf=new HdfsConfiguration(); conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,1); try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true).build(); int origPort=cluster.getNameNodePort(); int origHttpPort=cluster.getNameNode().getHttpAddress().getPort(); Configuration snnConf=new Configuration(conf); File checkpointDir=new File(MiniDFSCluster.getBaseDirectory(),"namesecondary"); snnConf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY,checkpointDir.getAbsolutePath()); secondary=startSecondaryNameNode(snnConf); secondary.doCheckpoint(); cluster.shutdown(); cluster=null; try { Thread.sleep(100); } catch ( InterruptedException ie) { } cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).nameNodePort(origPort).nameNodeHttpPort(origHttpPort).format(true).build(); try { secondary.doCheckpoint(); fail("Should have failed checkpoint against a different namespace"); } catch ( IOException ioe) { LOG.info("Got expected failure",ioe); assertTrue(ioe.toString().contains("Inconsistent checkpoint")); } } finally { cleanup(secondary); secondary=null; cleanup(cluster); cluster=null; } }

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
/** * Test that a fault while downloading edits does not prevent future * checkpointing */ @Test(timeout=30000) public void testEditFailureBeforeRename() throws IOException { Configuration conf=new HdfsConfiguration(); SecondaryNameNode secondary=null; MiniDFSCluster cluster=null; FileSystem fs=null; try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build(); cluster.waitActive(); fs=cluster.getFileSystem(); secondary=startSecondaryNameNode(conf); DFSTestUtil.createFile(fs,new Path("tmpfile0"),1024,(short)1,0l); secondary.doCheckpoint(); Mockito.doThrow(new IOException("Injecting failure before edit rename")).when(faultInjector).beforeEditsRename(); DFSTestUtil.createFile(fs,new Path("tmpfile1"),1024,(short)1,0l); try { secondary.doCheckpoint(); fail("Fault injection failed."); } catch ( IOException ioe) { GenericTestUtils.assertExceptionContains("Injecting failure before edit rename",ioe); } Mockito.reset(faultInjector); for ( StorageDirectory sd : secondary.getFSImage().getStorage().dirIterable(NameNodeDirType.EDITS)) { File[] tmpEdits=sd.getCurrentDir().listFiles(tmpEditsFilter); assertTrue("Expected a single tmp edits file in directory " + sd.toString(),tmpEdits.length == 1); RandomAccessFile randFile=new RandomAccessFile(tmpEdits[0],"rw"); randFile.setLength(0); randFile.close(); } secondary.doCheckpoint(); } finally { if (secondary != null) { secondary.shutdown(); } if (fs != null) { fs.close(); } if (cluster != null) { cluster.shutdown(); } Mockito.reset(faultInjector); } }

APIUtilityVerifier UtilityVerifier BooleanVerifier HybridVerifier 
/** * Test that the primary NN will not serve any files to a 2NN who doesn't * share its namespace ID, and also will not accept any files from one. */ @Test public void testNamespaceVerifiedOnFileTransfer() throws IOException { MiniDFSCluster cluster=null; Configuration conf=new HdfsConfiguration(); try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true).build(); NamenodeProtocols nn=cluster.getNameNodeRpc(); URL fsName=DFSUtil.getInfoServer(cluster.getNameNode().getServiceRpcAddress(),conf,DFSUtil.getHttpClientScheme(conf)).toURL(); nn.rollEditLog(); RemoteEditLogManifest manifest=nn.getEditLogManifest(1); RemoteEditLog log=manifest.getLogs().get(0); NNStorage dstImage=Mockito.mock(NNStorage.class); Mockito.doReturn(Lists.newArrayList(new File("/wont-be-written"))).when(dstImage).getFiles(Mockito.anyObject(),Mockito.anyString()); File mockImageFile=File.createTempFile("image",""); FileOutputStream imageFile=new FileOutputStream(mockImageFile); imageFile.write("data".getBytes()); imageFile.close(); Mockito.doReturn(mockImageFile).when(dstImage).findImageFile(Mockito.any(NameNodeFile.class),Mockito.anyLong()); Mockito.doReturn(new StorageInfo(1,1,"X",1,NodeType.NAME_NODE).toColonSeparatedString()).when(dstImage).toColonSeparatedString(); try { TransferFsImage.downloadImageToStorage(fsName,0,dstImage,false); fail("Storage info was not verified"); } catch ( IOException ioe) { String msg=StringUtils.stringifyException(ioe); assertTrue(msg,msg.contains("but the secondary expected")); } try { TransferFsImage.downloadEditsToStorage(fsName,log,dstImage); fail("Storage info was not verified"); } catch ( IOException ioe) { String msg=StringUtils.stringifyException(ioe); assertTrue(msg,msg.contains("but the secondary expected")); } try { TransferFsImage.uploadImageFromStorage(fsName,conf,dstImage,NameNodeFile.IMAGE,0); fail("Storage info was not verified"); } catch ( IOException ioe) { String msg=StringUtils.stringifyException(ioe); assertTrue(msg,msg.contains("but the secondary expected")); } } finally { cleanup(cluster); cluster=null; } }

UtilityVerifier BooleanVerifier HybridVerifier 
/** * Test that, an attempt to lock a storage that is already locked by a nodename, * logs error message that includes JVM name of the namenode that locked it. */ @Test public void testStorageAlreadyLockedErrorMessage() throws Exception { Configuration conf=new HdfsConfiguration(); MiniDFSCluster cluster=null; StorageDirectory savedSd=null; try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).build(); NNStorage storage=cluster.getNameNode().getFSImage().getStorage(); for ( StorageDirectory sd : storage.dirIterable(null)) { assertLockFails(sd); savedSd=sd; } LogCapturer logs=GenericTestUtils.LogCapturer.captureLogs(LogFactory.getLog(Storage.class)); try { savedSd.lock(); fail("Namenode should not be able to lock a storage that is already locked"); } catch ( IOException ioe) { String lockingJvmName=Path.WINDOWS ? "" : " " + ManagementFactory.getRuntimeMXBean().getName(); String expectedLogMessage="It appears that another namenode" + lockingJvmName + " has already locked the storage directory"; assertTrue("Log output does not contain expected log message: " + expectedLogMessage,logs.getOutput().contains(expectedLogMessage)); } } finally { cleanup(cluster); cluster=null; } }

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testLegacyOivImage() throws Exception { MiniDFSCluster cluster=null; SecondaryNameNode secondary=null; File tmpDir=Files.createTempDir(); Configuration conf=new HdfsConfiguration(); conf.set(DFSConfigKeys.DFS_NAMENODE_LEGACY_OIV_IMAGE_DIR_KEY,tmpDir.getAbsolutePath()); conf.set(DFSConfigKeys.DFS_NAMENODE_NUM_CHECKPOINTS_RETAINED_KEY,"2"); try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true).build(); secondary=startSecondaryNameNode(conf); secondary.doCheckpoint(); String files1[]=tmpDir.list(); assertEquals("Only one file is expected",1,files1.length); secondary.doCheckpoint(); secondary.doCheckpoint(); String files2[]=tmpDir.list(); assertEquals("Two files are expected",2,files2.length); for ( String fName : files2) { assertFalse(fName.equals(files1[0])); } } finally { cleanup(secondary); cleanup(cluster); tmpDir.delete(); } }

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
/** * Test that the secondary namenode correctly deletes temporary edits * on startup. */ @Test(timeout=60000) public void testDeleteTemporaryEditsOnStartup() throws IOException { Configuration conf=new HdfsConfiguration(); SecondaryNameNode secondary=null; MiniDFSCluster cluster=null; FileSystem fs=null; try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build(); cluster.waitActive(); fs=cluster.getFileSystem(); secondary=startSecondaryNameNode(conf); DFSTestUtil.createFile(fs,new Path("tmpfile0"),1024,(short)1,0l); secondary.doCheckpoint(); Mockito.doThrow(new IOException("Injecting failure before edit rename")).when(faultInjector).beforeEditsRename(); DFSTestUtil.createFile(fs,new Path("tmpfile1"),1024,(short)1,0l); try { secondary.doCheckpoint(); fail("Fault injection failed."); } catch ( IOException ioe) { GenericTestUtils.assertExceptionContains("Injecting failure before edit rename",ioe); } Mockito.reset(faultInjector); for ( StorageDirectory sd : secondary.getFSImage().getStorage().dirIterable(NameNodeDirType.EDITS)) { File[] tmpEdits=sd.getCurrentDir().listFiles(tmpEditsFilter); assertTrue("Expected a single tmp edits file in directory " + sd.toString(),tmpEdits.length == 1); } secondary.shutdown(); secondary=startSecondaryNameNode(conf); for ( StorageDirectory sd : secondary.getFSImage().getStorage().dirIterable(NameNodeDirType.EDITS)) { File[] tmpEdits=sd.getCurrentDir().listFiles(tmpEditsFilter); assertTrue("Did not expect a tmp edits file in directory " + sd.toString(),tmpEdits.length == 0); } secondary.doCheckpoint(); } finally { if (secondary != null) { secondary.shutdown(); } if (fs != null) { fs.close(); } if (cluster != null) { cluster.shutdown(); } Mockito.reset(faultInjector); } }

Class: org.apache.hadoop.hdfs.server.namenode.TestClusterId

APIUtilityVerifier BranchVerifier UtilityVerifier EqualityVerifier HybridVerifier 
/** * Test namenode format with -format -force -clusterid option when name * directory exists. Format should succeed. * @throws IOException */ @Test public void testFormatWithForceAndClusterId() throws IOException { if (!hdfsDir.mkdirs()) { fail("Failed to create dir " + hdfsDir.getPath()); } String myId="testFormatWithForceAndClusterId"; String[] argv={"-format","-force","-clusterid",myId}; try { NameNode.createNameNode(argv,config); fail("createNameNode() did not call System.exit()"); } catch ( ExitException e) { assertEquals("Format should have succeeded",0,e.status); } String cId=getClusterId(config); assertEquals("ClusterIds do not match",myId,cId); }

APIUtilityVerifier BranchVerifier UtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test namenode format with -format -force options when name directory * exists. Format should succeed. * @throws IOException */ @Test public void testFormatWithForce() throws IOException { if (!hdfsDir.mkdirs()) { fail("Failed to create dir " + hdfsDir.getPath()); } String[] argv={"-format","-force"}; try { NameNode.createNameNode(argv,config); fail("createNameNode() did not call System.exit()"); } catch ( ExitException e) { assertEquals("Format should have succeeded",0,e.status); } String cid=getClusterId(config); assertTrue("Didn't get new ClusterId",(cid != null && !cid.equals(""))); }

APIUtilityVerifier BranchVerifier UtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test namenode format with -force -nonInteractive -force option. Format * should succeed. * @throws IOException */ @Test public void testFormatWithNonInteractiveAndForce() throws IOException { if (!hdfsDir.mkdirs()) { fail("Failed to create dir " + hdfsDir.getPath()); } String[] argv={"-format","-nonInteractive","-force"}; try { NameNode.createNameNode(argv,config); fail("createNameNode() did not call System.exit()"); } catch ( ExitException e) { assertEquals("Format should have succeeded",0,e.status); } String cid=getClusterId(config); assertTrue("Didn't get new ClusterId",(cid != null && !cid.equals(""))); }

BranchVerifier UtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test namenode format with -format option when a non empty name directory * exists. Enter N when prompted and format should be aborted. * @throws IOException * @throws InterruptedException */ @Test public void testFormatWithoutForceEnterNo() throws IOException, InterruptedException { File data=new File(hdfsDir,"file"); if (!data.mkdirs()) { fail("Failed to create dir " + data.getPath()); } InputStream origIn=System.in; ByteArrayInputStream bins=new ByteArrayInputStream("N\n".getBytes()); System.setIn(bins); String[] argv={"-format"}; try { NameNode.createNameNode(argv,config); fail("createNameNode() did not call System.exit()"); } catch ( ExitException e) { assertEquals("Format should not have succeeded",1,e.status); } System.setIn(origIn); File version=new File(hdfsDir,"current/VERSION"); assertFalse("Check version should not exist",version.exists()); }

APIUtilityVerifier BranchVerifier UtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test namenode format with -format option when an empty name directory * exists. Format should succeed. * @throws IOException */ @Test public void testFormatWithEmptyDir() throws IOException { if (!hdfsDir.mkdirs()) { fail("Failed to create dir " + hdfsDir.getPath()); } String[] argv={"-format"}; try { NameNode.createNameNode(argv,config); fail("createNameNode() did not call System.exit()"); } catch ( ExitException e) { assertEquals("Format should have succeeded",0,e.status); } String cid=getClusterId(config); assertTrue("Didn't get new ClusterId",(cid != null && !cid.equals(""))); }

APIUtilityVerifier UtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test namenode format with -format -nonInteractive options when name * directory does not exist. Format should succeed. * @throws IOException */ @Test public void testFormatWithNonInteractiveNameDirDoesNotExit() throws IOException { String[] argv={"-format","-nonInteractive"}; try { NameNode.createNameNode(argv,config); fail("createNameNode() did not call System.exit()"); } catch ( ExitException e) { assertEquals("Format should have succeeded",0,e.status); } String cid=getClusterId(config); assertTrue("Didn't get new ClusterId",(cid != null && !cid.equals(""))); }

APIUtilityVerifier BranchVerifier UtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test namenode format with -format option when a non empty name directory * exists. Enter Y when prompted and the format should succeed. * @throws IOException * @throws InterruptedException */ @Test public void testFormatWithoutForceEnterYes() throws IOException, InterruptedException { File data=new File(hdfsDir,"file"); if (!data.mkdirs()) { fail("Failed to create dir " + data.getPath()); } InputStream origIn=System.in; ByteArrayInputStream bins=new ByteArrayInputStream("Y\n".getBytes()); System.setIn(bins); String[] argv={"-format"}; try { NameNode.createNameNode(argv,config); fail("createNameNode() did not call System.exit()"); } catch ( ExitException e) { assertEquals("Format should have succeeded",0,e.status); } System.setIn(origIn); String cid=getClusterId(config); assertTrue("Didn't get new ClusterId",(cid != null && !cid.equals(""))); }

APIUtilityVerifier UtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test namenode format with -format option. Format should succeed. * @throws IOException */ @Test public void testFormat() throws IOException { String[] argv={"-format"}; try { NameNode.createNameNode(argv,config); fail("createNameNode() did not call System.exit()"); } catch ( ExitException e) { assertEquals("Format should have succeeded",0,e.status); } String cid=getClusterId(config); assertTrue("Didn't get new ClusterId",(cid != null && !cid.equals(""))); }

BranchVerifier UtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test namenode format with -format -nonInteractive options when a non empty * name directory exists. Format should not succeed. * @throws IOException */ @Test public void testFormatWithNonInteractive() throws IOException { File data=new File(hdfsDir,"file"); if (!data.mkdirs()) { fail("Failed to create dir " + data.getPath()); } String[] argv={"-format","-nonInteractive"}; try { NameNode.createNameNode(argv,config); fail("createNameNode() did not call System.exit()"); } catch ( ExitException e) { assertEquals("Format should have been aborted with exit code 1",1,e.status); } File version=new File(hdfsDir,"current/VERSION"); assertFalse("Check version should not exist",version.exists()); }

Class: org.apache.hadoop.hdfs.server.namenode.TestDeadDatanode

APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
/** * Test to ensure namenode rejects request from dead datanode * - Start a cluster * - Shutdown the datanode and wait for it to be marked dead at the namenode * - Send datanode requests to Namenode and make sure it is rejected * appropriately. */ @Test public void testDeadDatanode() throws Exception { Configuration conf=new HdfsConfiguration(); conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,500); conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,1L); cluster=new MiniDFSCluster.Builder(conf).build(); cluster.waitActive(); String poolId=cluster.getNamesystem().getBlockPoolId(); DataNode dn=cluster.getDataNodes().get(0); DatanodeRegistration reg=DataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(0),poolId); waitForDatanodeState(reg.getDatanodeUuid(),true,20000); dn.shutdown(); waitForDatanodeState(reg.getDatanodeUuid(),false,20000); DatanodeProtocol dnp=cluster.getNameNodeRpc(); ReceivedDeletedBlockInfo[] blocks={new ReceivedDeletedBlockInfo(new Block(0),ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK,null)}; StorageReceivedDeletedBlocks[] storageBlocks={new StorageReceivedDeletedBlocks(reg.getDatanodeUuid(),blocks)}; try { dnp.blockReceivedAndDeleted(reg,poolId,storageBlocks); fail("Expected IOException is not thrown"); } catch ( IOException ex) { } StorageBlockReport[] report={new StorageBlockReport(new DatanodeStorage(reg.getDatanodeUuid()),new long[]{0L,0L,0L})}; try { dnp.blockReport(reg,poolId,report); fail("Expected IOException is not thrown"); } catch ( IOException ex) { } StorageReport[] rep={new StorageReport(new DatanodeStorage(reg.getDatanodeUuid()),false,0,0,0,0)}; DatanodeCommand[] cmd=dnp.sendHeartbeat(reg,rep,0L,0L,0,0,0).getCommands(); assertEquals(1,cmd.length); assertEquals(cmd[0].getAction(),RegisterCommand.REGISTER.getAction()); }

Class: org.apache.hadoop.hdfs.server.namenode.TestDiskspaceQuotaUpdate

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test if the quota can be correctly updated for append */ @Test(timeout=60000) public void testUpdateQuotaForAppend() throws Exception { final Path foo=new Path(dir,"foo"); final Path bar=new Path(foo,"bar"); long currentFileLen=BLOCKSIZE; DFSTestUtil.createFile(dfs,bar,currentFileLen,REPLICATION,seed); dfs.setQuota(foo,Long.MAX_VALUE - 1,Long.MAX_VALUE - 1); DFSTestUtil.appendFile(dfs,bar,BLOCKSIZE / 2); currentFileLen+=(BLOCKSIZE / 2); INodeDirectory fooNode=fsdir.getINode4Write(foo.toString()).asDirectory(); assertTrue(fooNode.isQuotaSet()); Quota.Counts quota=fooNode.getDirectoryWithQuotaFeature().getSpaceConsumed(); long ns=quota.get(Quota.NAMESPACE); long ds=quota.get(Quota.DISKSPACE); assertEquals(2,ns); assertEquals(currentFileLen * REPLICATION,ds); ContentSummary c=dfs.getContentSummary(foo); assertEquals(c.getSpaceConsumed(),ds); DFSTestUtil.appendFile(dfs,bar,BLOCKSIZE); currentFileLen+=BLOCKSIZE; quota=fooNode.getDirectoryWithQuotaFeature().getSpaceConsumed(); ns=quota.get(Quota.NAMESPACE); ds=quota.get(Quota.DISKSPACE); assertEquals(2,ns); assertEquals(currentFileLen * REPLICATION,ds); c=dfs.getContentSummary(foo); assertEquals(c.getSpaceConsumed(),ds); DFSTestUtil.appendFile(dfs,bar,BLOCKSIZE * 3 + BLOCKSIZE / 8); currentFileLen+=(BLOCKSIZE * 3 + BLOCKSIZE / 8); quota=fooNode.getDirectoryWithQuotaFeature().getSpaceConsumed(); ns=quota.get(Quota.NAMESPACE); ds=quota.get(Quota.DISKSPACE); assertEquals(2,ns); assertEquals(currentFileLen * REPLICATION,ds); c=dfs.getContentSummary(foo); assertEquals(c.getSpaceConsumed(),ds); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test if the quota can be correctly updated for create file */ @Test(timeout=60000) public void testQuotaUpdateWithFileCreate() throws Exception { final Path foo=new Path(dir,"foo"); Path createdFile=new Path(foo,"created_file.data"); dfs.mkdirs(foo); dfs.setQuota(foo,Long.MAX_VALUE - 1,Long.MAX_VALUE - 1); long fileLen=BLOCKSIZE * 2 + BLOCKSIZE / 2; DFSTestUtil.createFile(dfs,createdFile,BLOCKSIZE / 16,fileLen,BLOCKSIZE,REPLICATION,seed); INode fnode=fsdir.getINode4Write(foo.toString()); assertTrue(fnode.isDirectory()); assertTrue(fnode.isQuotaSet()); Quota.Counts cnt=fnode.asDirectory().getDirectoryWithQuotaFeature().getSpaceConsumed(); assertEquals(2,cnt.get(Quota.NAMESPACE)); assertEquals(fileLen * REPLICATION,cnt.get(Quota.DISKSPACE)); }

Class: org.apache.hadoop.hdfs.server.namenode.TestEditLog

APIUtilityVerifier UtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test loading an editlog with gaps. A single editlog directory * is set up. On of the edit log files is deleted. This should * fail when selecting the input streams as it will not be able * to select enough streams to load up to 4*TXNS_PER_ROLL. * There should be 4*TXNS_PER_ROLL transactions as we rolled 3 * times. */ @Test public void testLoadingWithGaps() throws IOException { File f1=new File(TEST_DIR + "/gaptest0"); List editUris=ImmutableList.of(f1.toURI()); NNStorage storage=setupEdits(editUris,3); final long startGapTxId=1 * TXNS_PER_ROLL + 1; final long endGapTxId=2 * TXNS_PER_ROLL; File[] files=new File(f1,"current").listFiles(new FilenameFilter(){ @Override public boolean accept( File dir, String name){ if (name.startsWith(NNStorage.getFinalizedEditsFileName(startGapTxId,endGapTxId))) { return true; } return false; } } ); assertEquals(1,files.length); assertTrue(files[0].delete()); FSEditLog editlog=getFSEditLog(storage); editlog.initJournalsForWrite(); long startTxId=1; try { editlog.selectInputStreams(startTxId,4 * TXNS_PER_ROLL); fail("Should have thrown exception"); } catch ( IOException ioe) { GenericTestUtils.assertExceptionContains("Gap in transactions. Expected to be able to read up until " + "at least txid 40 but unable to find any edit logs containing " + "txid 11",ioe); } }

APIUtilityVerifier UtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test edit log failover. If a single edit log is missing, other * edits logs should be used instead. */ @Test public void testEditLogFailOverFromMissing() throws IOException { File f1=new File(TEST_DIR + "/failover0"); File f2=new File(TEST_DIR + "/failover1"); List editUris=ImmutableList.of(f1.toURI(),f2.toURI()); NNStorage storage=setupEdits(editUris,3); final long startErrorTxId=1 * TXNS_PER_ROLL + 1; final long endErrorTxId=2 * TXNS_PER_ROLL; File[] files=new File(f1,"current").listFiles(new FilenameFilter(){ @Override public boolean accept( File dir, String name){ if (name.startsWith(NNStorage.getFinalizedEditsFileName(startErrorTxId,endErrorTxId))) { return true; } return false; } } ); assertEquals(1,files.length); assertTrue(files[0].delete()); FSEditLog editlog=getFSEditLog(storage); editlog.initJournalsForWrite(); long startTxId=1; Collection streams=null; try { streams=editlog.selectInputStreams(startTxId,4 * TXNS_PER_ROLL); readAllEdits(streams,startTxId); } catch ( IOException e) { LOG.error("edit log failover didn't work",e); fail("Edit log failover didn't work"); } finally { IOUtils.cleanup(null,streams.toArray(new EditLogInputStream[0])); } }

APIUtilityVerifier UtilityVerifier EqualityVerifier HybridVerifier 
/** * Test edit log failover from a corrupt edit log */ @Test public void testEditLogFailOverFromCorrupt() throws IOException { File f1=new File(TEST_DIR + "/failover0"); File f2=new File(TEST_DIR + "/failover1"); List editUris=ImmutableList.of(f1.toURI(),f2.toURI()); NNStorage storage=setupEdits(editUris,3); final long startErrorTxId=1 * TXNS_PER_ROLL + 1; final long endErrorTxId=2 * TXNS_PER_ROLL; File[] files=new File(f1,"current").listFiles(new FilenameFilter(){ @Override public boolean accept( File dir, String name){ if (name.startsWith(NNStorage.getFinalizedEditsFileName(startErrorTxId,endErrorTxId))) { return true; } return false; } } ); assertEquals(1,files.length); long fileLen=files[0].length(); LOG.debug("Corrupting Log File: " + files[0] + " len: "+ fileLen); RandomAccessFile rwf=new RandomAccessFile(files[0],"rw"); rwf.seek(fileLen - 4); int b=rwf.readInt(); rwf.seek(fileLen - 4); rwf.writeInt(b + 1); rwf.close(); FSEditLog editlog=getFSEditLog(storage); editlog.initJournalsForWrite(); long startTxId=1; Collection streams=null; try { streams=editlog.selectInputStreams(startTxId,4 * TXNS_PER_ROLL); readAllEdits(streams,startTxId); } catch ( IOException e) { LOG.error("edit log failover didn't work",e); fail("Edit log failover didn't work"); } finally { IOUtils.cleanup(null,streams.toArray(new EditLogInputStream[0])); } }

APIUtilityVerifier UtilityVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testEditChecksum() throws Exception { Configuration conf=new HdfsConfiguration(); MiniDFSCluster cluster=null; FileSystem fileSys=null; cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).build(); cluster.waitActive(); fileSys=cluster.getFileSystem(); final FSNamesystem namesystem=cluster.getNamesystem(); FSImage fsimage=namesystem.getFSImage(); final FSEditLog editLog=fsimage.getEditLog(); fileSys.mkdirs(new Path("/tmp")); Iterator iter=fsimage.getStorage().dirIterator(NameNodeDirType.EDITS); LinkedList sds=new LinkedList(); while (iter.hasNext()) { sds.add(iter.next()); } editLog.close(); cluster.shutdown(); for ( StorageDirectory sd : sds) { File editFile=NNStorage.getFinalizedEditsFile(sd,1,3); assertTrue(editFile.exists()); long fileLen=editFile.length(); LOG.debug("Corrupting Log File: " + editFile + " len: "+ fileLen); RandomAccessFile rwf=new RandomAccessFile(editFile,"rw"); rwf.seek(fileLen - 4); int b=rwf.readInt(); rwf.seek(fileLen - 4); rwf.writeInt(b + 1); rwf.close(); } try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).format(false).build(); fail("should not be able to start"); } catch ( IOException e) { assertNotNull("Cause of exception should be ChecksumException",e.getCause()); assertEquals("Cause of exception should be ChecksumException",ChecksumException.class,e.getCause().getClass()); } }

Class: org.apache.hadoop.hdfs.server.namenode.TestEditLogFileInputStream

APIUtilityVerifier EqualityVerifier ConditionMatcher HybridVerifier 
@Test public void testReadURL() throws Exception { HttpURLConnection conn=mock(HttpURLConnection.class); doReturn(new ByteArrayInputStream(FAKE_LOG_DATA)).when(conn).getInputStream(); doReturn(HttpURLConnection.HTTP_OK).when(conn).getResponseCode(); doReturn(Integer.toString(FAKE_LOG_DATA.length)).when(conn).getHeaderField("Content-Length"); URLConnectionFactory factory=mock(URLConnectionFactory.class); doReturn(conn).when(factory).openConnection(Mockito.any(),anyBoolean()); URL url=new URL("http://localhost/fakeLog"); EditLogInputStream elis=EditLogFileInputStream.fromUrl(factory,url,HdfsConstants.INVALID_TXID,HdfsConstants.INVALID_TXID,false); EnumMap> counts=FSImageTestUtil.countEditLogOpTypes(elis); assertThat(counts.get(FSEditLogOpCodes.OP_ADD).held,is(1)); assertThat(counts.get(FSEditLogOpCodes.OP_SET_GENSTAMP_V1).held,is(1)); assertThat(counts.get(FSEditLogOpCodes.OP_CLOSE).held,is(1)); assertEquals(FAKE_LOG_DATA.length,elis.length()); elis.close(); }

Class: org.apache.hadoop.hdfs.server.namenode.TestEditLogFileOutputStream

TestCleaner TestInitializer HybridVerifier 
@Before @After public void deleteEditsFile(){ if (TEST_EDITS.exists()) TEST_EDITS.delete(); }

Class: org.apache.hadoop.hdfs.server.namenode.TestEditLogJournalFailures

UtilityVerifier BooleanVerifier HybridVerifier 
@Test public void testAllEditsDirFailOnWrite() throws IOException { assertTrue(doAnEdit()); invalidateEditsDirAtIndex(0,true,true); invalidateEditsDirAtIndex(1,true,true); try { doAnEdit(); fail("The previous edit could not be synced to any persistent storage, " + " should have halted the NN"); } catch ( RemoteException re) { assertTrue(re.getClassName().contains("ExitException")); GenericTestUtils.assertExceptionContains("Could not sync enough journals to persistent storage due to " + "No journals available to flush. " + "Unsynced transactions: 1",re); } }

UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
@Test public void testSingleRequiredFailedEditsDirOnSetReadyToFlush() throws IOException { String[] editsDirs=cluster.getConfiguration(0).getTrimmedStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY); shutDownMiniCluster(); Configuration conf=new HdfsConfiguration(); conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_REQUIRED_KEY,editsDirs[0]); conf.setInt(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_MINIMUM_KEY,0); conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKED_VOLUMES_MINIMUM_KEY,0); setUpMiniCluster(conf,true); assertTrue(doAnEdit()); invalidateEditsDirAtIndex(0,false,false); JournalAndStream nonRequiredJas=getJournalAndStream(1); EditLogFileOutputStream nonRequiredSpy=spyOnStream(nonRequiredJas); assertTrue(nonRequiredJas.isActive()); try { doAnEdit(); fail("A single failure of a required journal should have halted the NN"); } catch ( RemoteException re) { assertTrue(re.getClassName().contains("ExitException")); GenericTestUtils.assertExceptionContains("setReadyToFlush failed for required journal",re); } Mockito.verify(nonRequiredSpy,Mockito.never()).setReadyToFlush(); assertFalse(nonRequiredJas.isActive()); }

UtilityVerifier BooleanVerifier HybridVerifier 
@Test public void testAllEditsDirsFailOnFlush() throws IOException { assertTrue(doAnEdit()); invalidateEditsDirAtIndex(0,true,false); invalidateEditsDirAtIndex(1,true,false); try { doAnEdit(); fail("The previous edit could not be synced to any persistent storage, " + "should have halted the NN"); } catch ( RemoteException re) { assertTrue(re.getClassName().contains("ExitException")); GenericTestUtils.assertExceptionContains("Could not sync enough journals to persistent storage. " + "Unsynced transactions: 1",re); } }

UtilityVerifier BooleanVerifier HybridVerifier 
@Test public void testMultipleRedundantFailedEditsDirOnSetReadyToFlush() throws IOException { shutDownMiniCluster(); Configuration conf=new HdfsConfiguration(); String[] nameDirs=new String[4]; for (int i=0; i < nameDirs.length; i++) { File nameDir=new File(PathUtils.getTestDir(getClass()),"name-dir" + i); nameDir.mkdirs(); nameDirs[i]=nameDir.getAbsolutePath(); } conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,StringUtils.join(nameDirs,",")); conf.setInt(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_MINIMUM_KEY,2); setUpMiniCluster(conf,false); assertTrue(doAnEdit()); invalidateEditsDirAtIndex(0,false,false); assertTrue(doAnEdit()); invalidateEditsDirAtIndex(1,false,false); assertTrue(doAnEdit()); invalidateEditsDirAtIndex(2,false,false); try { doAnEdit(); fail("A failure of more than the minimum number of redundant journals " + "should have halted "); } catch ( RemoteException re) { assertTrue(re.getClassName().contains("ExitException")); GenericTestUtils.assertExceptionContains("Could not sync enough journals to persistent storage due to " + "setReadyToFlush failed for too many journals. " + "Unsynced transactions: 1",re); } }

Class: org.apache.hadoop.hdfs.server.namenode.TestEditLogRace

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
/** * Tests rolling edit logs while transactions are ongoing. */ @Test public void testEditLogRolling() throws Exception { Configuration conf=new HdfsConfiguration(); MiniDFSCluster cluster=null; FileSystem fileSys=null; AtomicReference caughtErr=new AtomicReference(); try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).build(); cluster.waitActive(); fileSys=cluster.getFileSystem(); final FSNamesystem namesystem=cluster.getNamesystem(); FSImage fsimage=namesystem.getFSImage(); StorageDirectory sd=fsimage.getStorage().getStorageDir(0); startTransactionWorkers(namesystem,caughtErr); long previousLogTxId=1; for (int i=0; i < NUM_ROLLS && caughtErr.get() == null; i++) { try { Thread.sleep(20); } catch ( InterruptedException e) { } LOG.info("Starting roll " + i + "."); CheckpointSignature sig=namesystem.rollEditLog(); long nextLog=sig.curSegmentTxId; String logFileName=NNStorage.getFinalizedEditsFileName(previousLogTxId,nextLog - 1); previousLogTxId+=verifyEditLogs(namesystem,fsimage,logFileName,previousLogTxId); assertEquals(previousLogTxId,nextLog); File expectedLog=NNStorage.getInProgressEditsFile(sd,previousLogTxId); assertTrue("Expect " + expectedLog + " to exist",expectedLog.exists()); } } finally { stopTransactionWorkers(); if (caughtErr.get() != null) { throw new RuntimeException(caughtErr.get()); } if (fileSys != null) fileSys.close(); if (cluster != null) cluster.shutdown(); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * The logSync() method in FSEditLog is unsynchronized whiel syncing * so that other threads can concurrently enqueue edits while the prior * sync is ongoing. This test checks that the log is saved correctly * if the saveImage occurs while the syncing thread is in the unsynchronized middle section. * This replicates the following manual test proposed by Konstantin: * I start the name-node in debugger. * I do -mkdir and stop the debugger in logSync() just before it does flush. * Then I enter safe mode with another client * I start saveNamepsace and stop the debugger in * FSImage.saveFSImage() -> FSEditLog.createEditLogFile() * -> EditLogFileOutputStream.create() -> * after truncating the file but before writing LAYOUT_VERSION into it. * Then I let logSync() run. * Then I terminate the name-node. * After that the name-node wont start, since the edits file is broken. */ @Test public void testSaveImageWhileSyncInProgress() throws Exception { Configuration conf=getConf(); NameNode.initMetrics(conf,NamenodeRole.NAMENODE); DFSTestUtil.formatNameNode(conf); final FSNamesystem namesystem=FSNamesystem.loadFromDisk(conf); try { FSImage fsimage=namesystem.getFSImage(); FSEditLog editLog=fsimage.getEditLog(); JournalAndStream jas=editLog.getJournals().get(0); EditLogFileOutputStream spyElos=spy((EditLogFileOutputStream)jas.getCurrentStream()); jas.setCurrentStreamForTests(spyElos); final AtomicReference deferredException=new AtomicReference(); final CountDownLatch waitToEnterFlush=new CountDownLatch(1); final Thread doAnEditThread=new Thread(){ @Override public void run(){ try { LOG.info("Starting mkdirs"); namesystem.mkdirs("/test",new PermissionStatus("test","test",new FsPermission((short)00755)),true); LOG.info("mkdirs complete"); } catch ( Throwable ioe) { LOG.fatal("Got exception",ioe); deferredException.set(ioe); waitToEnterFlush.countDown(); } } } ; Answer blockingFlush=new Answer(){ @Override public Void answer( InvocationOnMock invocation) throws Throwable { LOG.info("Flush called"); if (Thread.currentThread() == doAnEditThread) { LOG.info("edit thread: Telling main thread we made it to flush section..."); waitToEnterFlush.countDown(); LOG.info("edit thread: sleeping for " + BLOCK_TIME + "secs"); Thread.sleep(BLOCK_TIME * 1000); LOG.info("Going through to flush. This will allow the main thread to continue."); } invocation.callRealMethod(); LOG.info("Flush complete"); return null; } } ; doAnswer(blockingFlush).when(spyElos).flush(); doAnEditThread.start(); LOG.info("Main thread: waiting to enter flush..."); waitToEnterFlush.await(); assertNull(deferredException.get()); LOG.info("Main thread: detected that logSync is in unsynchronized section."); LOG.info("Trying to enter safe mode."); LOG.info("This should block for " + BLOCK_TIME + "sec, since flush will sleep that long"); long st=Time.now(); namesystem.setSafeMode(SafeModeAction.SAFEMODE_ENTER); long et=Time.now(); LOG.info("Entered safe mode"); assertTrue(et - st > (BLOCK_TIME - 1) * 1000); namesystem.saveNamespace(); LOG.info("Joining on edit thread..."); doAnEditThread.join(); assertNull(deferredException.get()); assertEquals(3,verifyEditLogs(namesystem,fsimage,NNStorage.getFinalizedEditsFileName(1,3),1)); assertEquals(1,verifyEditLogs(namesystem,fsimage,NNStorage.getInProgressEditsFileName(4),4)); } finally { LOG.info("Closing namesystem"); if (namesystem != null) namesystem.close(); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Most of the FSNamesystem methods have a synchronized section where they * update the name system itself and write to the edit log, and then * unsynchronized, they call logSync. This test verifies that, if an * operation has written to the edit log but not yet synced it, * we wait for that sync before entering safe mode. */ @Test public void testSaveRightBeforeSync() throws Exception { Configuration conf=getConf(); NameNode.initMetrics(conf,NamenodeRole.NAMENODE); DFSTestUtil.formatNameNode(conf); final FSNamesystem namesystem=FSNamesystem.loadFromDisk(conf); try { FSImage fsimage=namesystem.getFSImage(); FSEditLog editLog=spy(fsimage.getEditLog()); fsimage.editLog=editLog; final AtomicReference deferredException=new AtomicReference(); final CountDownLatch waitToEnterSync=new CountDownLatch(1); final Thread doAnEditThread=new Thread(){ @Override public void run(){ try { LOG.info("Starting mkdirs"); namesystem.mkdirs("/test",new PermissionStatus("test","test",new FsPermission((short)00755)),true); LOG.info("mkdirs complete"); } catch ( Throwable ioe) { LOG.fatal("Got exception",ioe); deferredException.set(ioe); waitToEnterSync.countDown(); } } } ; Answer blockingSync=new Answer(){ @Override public Void answer( InvocationOnMock invocation) throws Throwable { LOG.info("logSync called"); if (Thread.currentThread() == doAnEditThread) { LOG.info("edit thread: Telling main thread we made it just before logSync..."); waitToEnterSync.countDown(); LOG.info("edit thread: sleeping for " + BLOCK_TIME + "secs"); Thread.sleep(BLOCK_TIME * 1000); LOG.info("Going through to logSync. This will allow the main thread to continue."); } invocation.callRealMethod(); LOG.info("logSync complete"); return null; } } ; doAnswer(blockingSync).when(editLog).logSync(); doAnEditThread.start(); LOG.info("Main thread: waiting to just before logSync..."); waitToEnterSync.await(); assertNull(deferredException.get()); LOG.info("Main thread: detected that logSync about to be called."); LOG.info("Trying to enter safe mode."); LOG.info("This should block for " + BLOCK_TIME + "sec, since we have pending edits"); long st=Time.now(); namesystem.setSafeMode(SafeModeAction.SAFEMODE_ENTER); long et=Time.now(); LOG.info("Entered safe mode"); assertTrue(et - st > (BLOCK_TIME - 1) * 1000); namesystem.saveNamespace(); LOG.info("Joining on edit thread..."); doAnEditThread.join(); assertNull(deferredException.get()); assertEquals(3,verifyEditLogs(namesystem,fsimage,NNStorage.getFinalizedEditsFileName(1,3),1)); assertEquals(1,verifyEditLogs(namesystem,fsimage,NNStorage.getInProgressEditsFileName(4),4)); } finally { LOG.info("Closing namesystem"); if (namesystem != null) namesystem.close(); } }

Class: org.apache.hadoop.hdfs.server.namenode.TestEditsDoubleBuffer

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testDoubleBuffer() throws IOException { EditsDoubleBuffer buf=new EditsDoubleBuffer(1024); assertTrue(buf.isFlushed()); byte[] data=new byte[100]; buf.writeRaw(data,0,data.length); assertEquals("Should count new data correctly",data.length,buf.countBufferedBytes()); assertTrue("Writing to current buffer should not affect flush state",buf.isFlushed()); buf.setReadyToFlush(); assertEquals("Swapping buffers should still count buffered bytes",data.length,buf.countBufferedBytes()); assertFalse(buf.isFlushed()); DataOutputBuffer outBuf=new DataOutputBuffer(); buf.flushTo(outBuf); assertEquals(data.length,outBuf.getLength()); assertTrue(buf.isFlushed()); assertEquals(0,buf.countBufferedBytes()); buf.writeRaw(data,0,data.length); assertEquals("Should count new data correctly",data.length,buf.countBufferedBytes()); buf.setReadyToFlush(); buf.flushTo(outBuf); assertEquals(data.length * 2,outBuf.getLength()); assertEquals(0,buf.countBufferedBytes()); outBuf.close(); }

Class: org.apache.hadoop.hdfs.server.namenode.TestFSDirectory

IterativeVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=300000) public void testXAttrMultiAddRemoveErrors() throws Exception { List existingXAttrs=Lists.newArrayList(); List toAdd=Lists.newArrayList(); toAdd.add(generatedXAttrs.get(0)); toAdd.add(generatedXAttrs.get(1)); toAdd.add(generatedXAttrs.get(2)); toAdd.add(generatedXAttrs.get(0)); try { fsdir.setINodeXAttrs(existingXAttrs,toAdd,EnumSet.of(XAttrSetFlag.CREATE)); fail("Specified the same xattr to be set twice"); } catch ( IOException e) { GenericTestUtils.assertExceptionContains("Cannot specify the same " + "XAttr to be set",e); } toAdd.remove(generatedXAttrs.get(0)); existingXAttrs.add(generatedXAttrs.get(0)); try { fsdir.setINodeXAttrs(existingXAttrs,toAdd,EnumSet.of(XAttrSetFlag.CREATE)); fail("Set XAttr that is already set without REPLACE flag"); } catch ( IOException e) { GenericTestUtils.assertExceptionContains("already exists",e); } try { fsdir.setINodeXAttrs(existingXAttrs,toAdd,EnumSet.of(XAttrSetFlag.REPLACE)); fail("Set XAttr that does not exist without the CREATE flag"); } catch ( IOException e) { GenericTestUtils.assertExceptionContains("does not exist",e); } toAdd.remove(generatedXAttrs.get(0)); List newXAttrs=fsdir.setINodeXAttrs(existingXAttrs,toAdd,EnumSet.of(XAttrSetFlag.CREATE)); assertEquals("Unexpected toAdd size",2,toAdd.size()); for ( XAttr x : toAdd) { assertTrue("Did not find added XAttr " + x,newXAttrs.contains(x)); } existingXAttrs=newXAttrs; toAdd=Lists.newArrayList(); for (int i=0; i < 3; i++) { XAttr xAttr=(new XAttr.Builder()).setNameSpace(XAttr.NameSpace.SYSTEM).setName("a" + i).setValue(new byte[]{(byte)(i * 2)}).build(); toAdd.add(xAttr); } newXAttrs=fsdir.setINodeXAttrs(existingXAttrs,toAdd,EnumSet.of(XAttrSetFlag.REPLACE)); assertEquals("Unexpected number of new XAttrs",3,newXAttrs.size()); for (int i=0; i < 3; i++) { assertArrayEquals("Unexpected XAttr value",new byte[]{(byte)(i * 2)},newXAttrs.get(i).getValue()); } existingXAttrs=newXAttrs; toAdd=Lists.newArrayList(); for (int i=0; i < 4; i++) { toAdd.add(generatedXAttrs.get(i)); } newXAttrs=fsdir.setINodeXAttrs(existingXAttrs,toAdd,EnumSet.of(XAttrSetFlag.CREATE,XAttrSetFlag.REPLACE)); verifyXAttrsPresent(newXAttrs,4); }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testINodeXAttrsLimit() throws Exception { List existingXAttrs=Lists.newArrayListWithCapacity(2); XAttr xAttr1=(new XAttr.Builder()).setNameSpace(XAttr.NameSpace.USER).setName("a1").setValue(new byte[]{0x31,0x32,0x33}).build(); XAttr xAttr2=(new XAttr.Builder()).setNameSpace(XAttr.NameSpace.USER).setName("a2").setValue(new byte[]{0x31,0x31,0x31}).build(); existingXAttrs.add(xAttr1); existingXAttrs.add(xAttr2); XAttr newSystemXAttr=(new XAttr.Builder()).setNameSpace(XAttr.NameSpace.SYSTEM).setName("a3").setValue(new byte[]{0x33,0x33,0x33}).build(); XAttr newRawXAttr=(new XAttr.Builder()).setNameSpace(XAttr.NameSpace.RAW).setName("a3").setValue(new byte[]{0x33,0x33,0x33}).build(); List newXAttrs=Lists.newArrayListWithCapacity(2); newXAttrs.add(newSystemXAttr); newXAttrs.add(newRawXAttr); List xAttrs=fsdir.setINodeXAttrs(existingXAttrs,newXAttrs,EnumSet.of(XAttrSetFlag.CREATE,XAttrSetFlag.REPLACE)); assertEquals(xAttrs.size(),4); XAttr newXAttr1=(new XAttr.Builder()).setNameSpace(XAttr.NameSpace.TRUSTED).setName("a4").setValue(new byte[]{0x34,0x34,0x34}).build(); newXAttrs.set(0,newXAttr1); try { fsdir.setINodeXAttrs(existingXAttrs,newXAttrs,EnumSet.of(XAttrSetFlag.CREATE,XAttrSetFlag.REPLACE)); fail("Setting user visible xattr on inode should fail if " + "reaching limit."); } catch ( IOException e) { GenericTestUtils.assertExceptionContains("Cannot add additional XAttr " + "to inode, would exceed limit",e); } }

Class: org.apache.hadoop.hdfs.server.namenode.TestFSEditLogLoader

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
@Test public void testDisplayRecentEditLogOpCodes() throws IOException { Configuration conf=new HdfsConfiguration(); MiniDFSCluster cluster=null; FileSystem fileSys=null; cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).enableManagedDfsDirsRedundancy(false).build(); cluster.waitActive(); fileSys=cluster.getFileSystem(); final FSNamesystem namesystem=cluster.getNamesystem(); FSImage fsimage=namesystem.getFSImage(); for (int i=0; i < 20; i++) { fileSys.mkdirs(new Path("/tmp/tmp" + i)); } StorageDirectory sd=fsimage.getStorage().dirIterator(NameNodeDirType.EDITS).next(); cluster.shutdown(); File editFile=FSImageTestUtil.findLatestEditsLog(sd).getFile(); assertTrue("Should exist: " + editFile,editFile.exists()); long fileLen=editFile.length(); RandomAccessFile rwf=new RandomAccessFile(editFile,"rw"); rwf.seek(fileLen - 40); for (int i=0; i < 20; i++) { rwf.write(FSEditLogOpCodes.OP_DELETE.getOpCode()); } rwf.close(); StringBuilder bld=new StringBuilder(); bld.append("^Error replaying edit log at offset \\d+. "); bld.append("Expected transaction ID was \\d+\n"); bld.append("Recent opcode offsets: (\\d+\\s*){4}$"); try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).enableManagedDfsDirsRedundancy(false).format(false).build(); fail("should not be able to start"); } catch ( IOException e) { assertTrue("error message contains opcodes message",e.getMessage().matches(bld.toString())); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testValidateEmptyEditLog() throws IOException { File testDir=new File(TEST_DIR,"testValidateEmptyEditLog"); SortedMap offsetToTxId=Maps.newTreeMap(); File logFile=prepareUnfinalizedTestEditLog(testDir,0,offsetToTxId); truncateFile(logFile,8); EditLogValidation validation=EditLogFileInputStream.validateEditLog(logFile); assertTrue(!validation.hasCorruptHeader()); assertEquals(HdfsConstants.INVALID_TXID,validation.getEndTxId()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testValidateEditLogWithCorruptBody() throws IOException { File testDir=new File(TEST_DIR,"testValidateEditLogWithCorruptBody"); SortedMap offsetToTxId=Maps.newTreeMap(); final int NUM_TXNS=20; File logFile=prepareUnfinalizedTestEditLog(testDir,NUM_TXNS,offsetToTxId); File logFileBak=new File(testDir,logFile.getName() + ".bak"); Files.copy(logFile,logFileBak); EditLogValidation validation=EditLogFileInputStream.validateEditLog(logFile); assertTrue(!validation.hasCorruptHeader()); assertEquals(NUM_TXNS + 1,validation.getEndTxId()); for ( Map.Entry entry : offsetToTxId.entrySet()) { long txOffset=entry.getKey(); long txId=entry.getValue(); Files.copy(logFileBak,logFile); corruptByteInFile(logFile,txOffset); validation=EditLogFileInputStream.validateEditLog(logFile); long expectedEndTxId=(txId == (NUM_TXNS + 1)) ? NUM_TXNS : (NUM_TXNS + 1); assertEquals("Failed when corrupting txn opcode at " + txOffset,expectedEndTxId,validation.getEndTxId()); assertTrue(!validation.hasCorruptHeader()); } for ( Map.Entry entry : offsetToTxId.entrySet()) { long txOffset=entry.getKey(); long txId=entry.getValue(); Files.copy(logFileBak,logFile); truncateFile(logFile,txOffset); validation=EditLogFileInputStream.validateEditLog(logFile); long expectedEndTxId=(txId == 0) ? HdfsConstants.INVALID_TXID : (txId - 1); assertEquals("Failed when corrupting txid " + txId + " txn opcode "+ "at "+ txOffset,expectedEndTxId,validation.getEndTxId()); assertTrue(!validation.hasCorruptHeader()); } }

Class: org.apache.hadoop.hdfs.server.namenode.TestFSImageStorageInspector

InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier 
/** * Simple test with image, edits, and inprogress edits */ @Test public void testCurrentStorageInspector() throws IOException { FSImageTransactionalStorageInspector inspector=new FSImageTransactionalStorageInspector(); StorageDirectory mockDir=FSImageTestUtil.mockStorageDirectory(NameNodeDirType.IMAGE_AND_EDITS,false,"/foo/current/" + getImageFileName(123),"/foo/current/" + getFinalizedEditsFileName(123,456),"/foo/current/" + getImageFileName(456),"/foo/current/" + getInProgressEditsFileName(457)); inspector.inspectDirectory(mockDir); assertEquals(2,inspector.foundImages.size()); FSImageFile latestImage=inspector.getLatestImages().get(0); assertEquals(456,latestImage.txId); assertSame(mockDir,latestImage.sd); assertTrue(inspector.isUpgradeFinalized()); assertEquals(new File("/foo/current/" + getImageFileName(456)),latestImage.getFile()); }

Class: org.apache.hadoop.hdfs.server.namenode.TestFSImageWithSnapshot

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
/** * Test when there is snapshot taken on root */ @Test public void testSnapshotOnRoot() throws Exception { final Path root=new Path("/"); hdfs.allowSnapshot(root); hdfs.createSnapshot(root,"s1"); cluster.shutdown(); cluster=new MiniDFSCluster.Builder(conf).format(false).numDataNodes(REPLICATION).build(); cluster.waitActive(); fsn=cluster.getNamesystem(); hdfs=cluster.getFileSystem(); hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER); hdfs.saveNamespace(); hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE); cluster.shutdown(); cluster=new MiniDFSCluster.Builder(conf).format(false).numDataNodes(REPLICATION).build(); cluster.waitActive(); fsn=cluster.getNamesystem(); hdfs=cluster.getFileSystem(); INodeDirectory rootNode=fsn.dir.getINode4Write(root.toString()).asDirectory(); assertTrue("The children list of root should be empty",rootNode.getChildrenList(Snapshot.CURRENT_STATE_ID).isEmpty()); List diffList=rootNode.getDiffs().asList(); assertEquals(1,diffList.size()); Snapshot s1=rootNode.getSnapshot(DFSUtil.string2Bytes("s1")); assertEquals(s1.getId(),diffList.get(0).getSnapshotId()); assertEquals(1,fsn.getSnapshotManager().getNumSnapshottableDirs()); SnapshottableDirectoryStatus[] sdirs=fsn.getSnapshotManager().getSnapshottableDirListing(null); assertEquals(root,sdirs[0].getFullPath()); hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER); hdfs.saveNamespace(); hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE); cluster.shutdown(); cluster=new MiniDFSCluster.Builder(conf).format(false).numDataNodes(REPLICATION).build(); cluster.waitActive(); fsn=cluster.getNamesystem(); hdfs=cluster.getFileSystem(); }

Class: org.apache.hadoop.hdfs.server.namenode.TestFSNamesystem

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testFSNamesystemLockCompatibility(){ FSNamesystemLock rwLock=new FSNamesystemLock(true); assertEquals(0,rwLock.getReadHoldCount()); rwLock.readLock().lock(); assertEquals(1,rwLock.getReadHoldCount()); rwLock.readLock().lock(); assertEquals(2,rwLock.getReadHoldCount()); rwLock.readLock().unlock(); assertEquals(1,rwLock.getReadHoldCount()); rwLock.readLock().unlock(); assertEquals(0,rwLock.getReadHoldCount()); assertFalse(rwLock.isWriteLockedByCurrentThread()); assertEquals(0,rwLock.getWriteHoldCount()); rwLock.writeLock().lock(); assertTrue(rwLock.isWriteLockedByCurrentThread()); assertEquals(1,rwLock.getWriteHoldCount()); rwLock.writeLock().lock(); assertTrue(rwLock.isWriteLockedByCurrentThread()); assertEquals(2,rwLock.getWriteHoldCount()); rwLock.writeLock().unlock(); assertTrue(rwLock.isWriteLockedByCurrentThread()); assertEquals(1,rwLock.getWriteHoldCount()); rwLock.writeLock().unlock(); assertFalse(rwLock.isWriteLockedByCurrentThread()); assertEquals(0,rwLock.getWriteHoldCount()); }

Class: org.apache.hadoop.hdfs.server.namenode.TestFSNamesystemMBean

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier 
@Test public void test() throws Exception { Configuration conf=new Configuration(); MiniDFSCluster cluster=null; try { cluster=new MiniDFSCluster.Builder(conf).build(); cluster.waitActive(); FSNamesystem fsn=cluster.getNameNode().namesystem; MBeanServer mbs=ManagementFactory.getPlatformMBeanServer(); ObjectName mxbeanName=new ObjectName("Hadoop:service=NameNode,name=FSNamesystemState"); String snapshotStats=(String)(mbs.getAttribute(mxbeanName,"SnapshotStats")); @SuppressWarnings("unchecked") Map stat=(Map)JSON.parse(snapshotStats); assertTrue(stat.containsKey("SnapshottableDirectories") && (Long)stat.get("SnapshottableDirectories") == fsn.getNumSnapshottableDirs()); assertTrue(stat.containsKey("Snapshots") && (Long)stat.get("Snapshots") == fsn.getNumSnapshots()); Object pendingDeletionBlocks=mbs.getAttribute(mxbeanName,"PendingDeletionBlocks"); assertNotNull(pendingDeletionBlocks); assertTrue(pendingDeletionBlocks instanceof Long); } finally { if (cluster != null) { cluster.shutdown(); } } }

Class: org.apache.hadoop.hdfs.server.namenode.TestFavoredNodesEndToEnd

APIUtilityVerifier IterativeVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
@Test(timeout=180000) public void testWhenSomeNodesAreNotGood() throws Exception { final InetSocketAddress addrs[]=new InetSocketAddress[4]; final String[] hosts=new String[addrs.length]; for (int i=0; i < addrs.length; i++) { addrs[i]=datanodes.get(i).getXferAddress(); hosts[i]=addrs[i].getAddress().getHostAddress() + ":" + addrs[i].getPort(); } DatanodeInfo d=cluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().getDatanodeByXferAddr(addrs[0].getAddress().getHostAddress(),addrs[0].getPort()); d.setDecommissioned(); Path p=new Path("/filename-foo-bar-baz"); final short replication=(short)3; FSDataOutputStream out=dfs.create(p,FsPermission.getDefault(),true,4096,replication,4096L,null,addrs); out.write(SOME_BYTES); out.close(); d.stopDecommission(); BlockLocation[] locations=getBlockLocations(p); Assert.assertEquals(replication,locations[0].getNames().length); ; for (int i=0; i < replication; i++) { final String loc=locations[0].getNames()[i]; int j=0; for (; j < hosts.length && !loc.equals(hosts[j]); j++) ; Assert.assertTrue("j=" + j,j > 0); Assert.assertTrue("loc=" + loc + " not in host list "+ Arrays.asList(hosts)+ ", j="+ j,j < hosts.length); } }

Class: org.apache.hadoop.hdfs.server.namenode.TestFileJournalManager

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Make sure that in-progress streams aren't counted if we don't ask for * them. */ @Test public void testExcludeInProgressStreams() throws CorruptionException, IOException { File f=new File(TestEditLog.TEST_DIR + "/excludeinprogressstreams"); NNStorage storage=setupEdits(Collections.singletonList(f.toURI()),10,false); StorageDirectory sd=storage.dirIterator(NameNodeDirType.EDITS).next(); FileJournalManager jm=new FileJournalManager(conf,sd,storage); assertEquals(100,getNumberOfTransactions(jm,1,false,false)); EditLogInputStream elis=getJournalInputStream(jm,90,false); try { FSEditLogOp lastReadOp=null; while ((lastReadOp=elis.readOp()) != null) { assertTrue(lastReadOp.getTransactionId() <= 100); } } finally { IOUtils.cleanup(LOG,elis); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test that we receive the correct number of transactions when we count * the number of transactions around gaps. * Set up a single edits directory, with no failures. Delete the 4th logfile. * Test that getNumberOfTransactions returns the correct number of * transactions before this gap and after this gap. Also verify that if you * try to count on the gap that an exception is thrown. */ @Test public void testManyLogsWithGaps() throws IOException { File f=new File(TestEditLog.TEST_DIR + "/manylogswithgaps"); NNStorage storage=setupEdits(Collections.singletonList(f.toURI()),10); StorageDirectory sd=storage.dirIterator(NameNodeDirType.EDITS).next(); final long startGapTxId=3 * TXNS_PER_ROLL + 1; final long endGapTxId=4 * TXNS_PER_ROLL; File[] files=new File(f,"current").listFiles(new FilenameFilter(){ @Override public boolean accept( File dir, String name){ if (name.startsWith(NNStorage.getFinalizedEditsFileName(startGapTxId,endGapTxId))) { return true; } return false; } } ); assertEquals(1,files.length); assertTrue(files[0].delete()); FileJournalManager jm=new FileJournalManager(conf,sd,storage); assertEquals(startGapTxId - 1,getNumberOfTransactions(jm,1,true,true)); assertEquals(0,getNumberOfTransactions(jm,startGapTxId,true,true)); assertEquals(11 * TXNS_PER_ROLL - endGapTxId,getNumberOfTransactions(jm,endGapTxId + 1,true,true)); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier ExceptionVerifier HybridVerifier 
@Test(expected=IllegalStateException.class) public void testFinalizeErrorReportedToNNStorage() throws IOException, InterruptedException { File f=new File(TestEditLog.TEST_DIR + "/filejournaltestError"); NNStorage storage=setupEdits(Collections.singletonList(f.toURI()),10,new AbortSpec(10,0)); StorageDirectory sd=storage.dirIterator(NameNodeDirType.EDITS).next(); FileJournalManager jm=new FileJournalManager(conf,sd,storage); String sdRootPath=sd.getRoot().getAbsolutePath(); FileUtil.chmod(sdRootPath,"-w",true); try { jm.finalizeLogSegment(0,1); } finally { FileUtil.chmod(sdRootPath,"+w",true); assertTrue(storage.getRemovedStorageDirs().contains(sd)); } }

Class: org.apache.hadoop.hdfs.server.namenode.TestFsck

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test if fsck can return -1 in case of failure * @throws Exception */ @Test public void testFsckError() throws Exception { MiniDFSCluster cluster=null; try { Configuration conf=new HdfsConfiguration(); cluster=new MiniDFSCluster.Builder(conf).build(); String fileName="/test.txt"; Path filePath=new Path(fileName); FileSystem fs=cluster.getFileSystem(); DFSTestUtil.createFile(fs,filePath,1L,(short)1,1L); DFSTestUtil.waitReplication(fs,filePath,(short)1); INodeFile node=(INodeFile)cluster.getNamesystem().dir.getNode(fileName,true); final BlockInfo[] blocks=node.getBlocks(); assertEquals(blocks.length,1); blocks[0].setNumBytes(-1L); String outStr=runFsck(conf,-1,true,fileName); System.out.println(outStr); assertTrue(outStr.contains(NamenodeFsck.FAILURE_STATUS)); fs.delete(filePath,true); } finally { if (cluster != null) { cluster.shutdown(); } } }

UtilityVerifier BooleanVerifier HybridVerifier 
/** * Test fsck with FileNotFound */ @Test public void testFsckFileNotFound() throws Exception { final short NUM_REPLICAS=1; Configuration conf=new Configuration(); NameNode namenode=mock(NameNode.class); NetworkTopology nettop=mock(NetworkTopology.class); Map pmap=new HashMap(); Writer result=new StringWriter(); PrintWriter out=new PrintWriter(result,true); InetAddress remoteAddress=InetAddress.getLocalHost(); FSNamesystem fsName=mock(FSNamesystem.class); BlockManager blockManager=mock(BlockManager.class); DatanodeManager dnManager=mock(DatanodeManager.class); when(namenode.getNamesystem()).thenReturn(fsName); when(fsName.getBlockLocations(anyString(),anyLong(),anyLong(),anyBoolean(),anyBoolean(),anyBoolean())).thenThrow(new FileNotFoundException()); when(fsName.getBlockManager()).thenReturn(blockManager); when(blockManager.getDatanodeManager()).thenReturn(dnManager); NamenodeFsck fsck=new NamenodeFsck(conf,namenode,nettop,pmap,out,NUM_REPLICAS,(short)1,remoteAddress); String pathString="/tmp/testFile"; long length=123L; boolean isDir=false; int blockReplication=1; long blockSize=128 * 1024L; long modTime=123123123L; long accessTime=123123120L; FsPermission perms=FsPermission.getDefault(); String owner="foo"; String group="bar"; byte[] symlink=null; byte[] path=new byte[128]; path=DFSUtil.string2Bytes(pathString); long fileId=312321L; int numChildren=1; HdfsFileStatus file=new HdfsFileStatus(length,isDir,blockReplication,blockSize,modTime,accessTime,perms,owner,group,symlink,path,fileId,numChildren,null); Result res=new Result(conf); try { fsck.check(pathString,file,res); } catch ( Exception e) { fail("Unexpected exception " + e.getMessage()); } assertTrue(res.toString().contains("HEALTHY")); }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Tests that the # of missing block replicas and expected replicas is correct * @throws IOException */ @Test public void testFsckMissingReplicas() throws IOException { final short REPL_FACTOR=2; final short NUM_REPLICAS=1; final short NUM_BLOCKS=3; final long blockSize=512; Configuration conf=new Configuration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,blockSize); MiniDFSCluster cluster=null; DistributedFileSystem dfs=null; try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_REPLICAS).build(); assertNotNull("Failed Cluster Creation",cluster); cluster.waitClusterUp(); dfs=cluster.getFileSystem(); assertNotNull("Failed to get FileSystem",dfs); final String pathString=new String("/testfile"); final Path path=new Path(pathString); long fileLen=blockSize * NUM_BLOCKS; DFSTestUtil.createFile(dfs,path,fileLen,REPL_FACTOR,1); NameNode namenode=cluster.getNameNode(); NetworkTopology nettop=cluster.getNamesystem().getBlockManager().getDatanodeManager().getNetworkTopology(); Map pmap=new HashMap(); Writer result=new StringWriter(); PrintWriter out=new PrintWriter(result,true); InetAddress remoteAddress=InetAddress.getLocalHost(); NamenodeFsck fsck=new NamenodeFsck(conf,namenode,nettop,pmap,out,NUM_REPLICAS,(short)1,remoteAddress); final HdfsFileStatus file=namenode.getRpcServer().getFileInfo(pathString); assertNotNull(file); Result res=new Result(conf); fsck.check(pathString,file,res); System.out.println(result.toString()); assertEquals(res.missingReplicas,(NUM_BLOCKS * REPL_FACTOR) - (NUM_BLOCKS * NUM_REPLICAS)); assertEquals(res.numExpectedReplicas,NUM_BLOCKS * REPL_FACTOR); } finally { if (dfs != null) { dfs.close(); } if (cluster != null) { cluster.shutdown(); } } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * do fsck */ @Test public void testFsck() throws Exception { DFSTestUtil util=new DFSTestUtil.Builder().setName("TestFsck").setNumFiles(20).build(); MiniDFSCluster cluster=null; FileSystem fs=null; try { Configuration conf=new HdfsConfiguration(); final long precision=1L; conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY,precision); conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,10000L); cluster=new MiniDFSCluster.Builder(conf).numDataNodes(4).build(); fs=cluster.getFileSystem(); final String fileName="/srcdat"; util.createFiles(fs,fileName); util.waitReplication(fs,fileName,(short)3); final Path file=new Path(fileName); long aTime=fs.getFileStatus(file).getAccessTime(); Thread.sleep(precision); setupAuditLogs(); String outStr=runFsck(conf,0,true,"/"); verifyAuditLogs(); assertEquals(aTime,fs.getFileStatus(file).getAccessTime()); System.out.println(outStr); assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS)); if (fs != null) { try { fs.close(); } catch ( Exception e) { } } cluster.shutdown(); cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false).build(); outStr=runFsck(conf,1,true,"/"); assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS)); System.out.println(outStr); cluster.startDataNodes(conf,4,true,null,null); cluster.waitActive(); cluster.waitClusterUp(); fs=cluster.getFileSystem(); util.cleanup(fs,"/srcdat"); } finally { if (fs != null) { try { fs.close(); } catch ( Exception e) { } } if (cluster != null) { cluster.shutdown(); } } }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Tests that the # of misreplaced replicas is correct * @throws IOException */ @Test public void testFsckMisPlacedReplicas() throws IOException { final short REPL_FACTOR=2; short NUM_DN=2; final short NUM_BLOCKS=3; final long blockSize=512; String[] racks={"/rack1","/rack1"}; String[] hosts={"host1","host2"}; Configuration conf=new Configuration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,blockSize); MiniDFSCluster cluster=null; DistributedFileSystem dfs=null; try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DN).hosts(hosts).racks(racks).build(); assertNotNull("Failed Cluster Creation",cluster); cluster.waitClusterUp(); dfs=cluster.getFileSystem(); assertNotNull("Failed to get FileSystem",dfs); final String pathString=new String("/testfile"); final Path path=new Path(pathString); long fileLen=blockSize * NUM_BLOCKS; DFSTestUtil.createFile(dfs,path,fileLen,REPL_FACTOR,1); NameNode namenode=cluster.getNameNode(); NetworkTopology nettop=cluster.getNamesystem().getBlockManager().getDatanodeManager().getNetworkTopology(); nettop.add(DFSTestUtil.getDatanodeDescriptor("/rack2","/host3")); NUM_DN++; Map pmap=new HashMap(); Writer result=new StringWriter(); PrintWriter out=new PrintWriter(result,true); InetAddress remoteAddress=InetAddress.getLocalHost(); NamenodeFsck fsck=new NamenodeFsck(conf,namenode,nettop,pmap,out,NUM_DN,REPL_FACTOR,remoteAddress); final HdfsFileStatus file=namenode.getRpcServer().getFileInfo(pathString); assertNotNull(file); Result res=new Result(conf); fsck.check(pathString,file,res); assertEquals(res.numMisReplicatedBlocks,NUM_BLOCKS); } finally { if (dfs != null) { dfs.close(); } if (cluster != null) { cluster.shutdown(); } } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test fsck with symlinks in the filesystem */ @Test public void testFsckSymlink() throws Exception { final DFSTestUtil util=new DFSTestUtil.Builder().setName(getClass().getSimpleName()).setNumFiles(1).build(); final Configuration conf=new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,10000L); MiniDFSCluster cluster=null; FileSystem fs=null; try { final long precision=1L; conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY,precision); conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,10000L); cluster=new MiniDFSCluster.Builder(conf).numDataNodes(4).build(); fs=cluster.getFileSystem(); final String fileName="/srcdat"; util.createFiles(fs,fileName); final FileContext fc=FileContext.getFileContext(cluster.getConfiguration(0)); final Path file=new Path(fileName); final Path symlink=new Path("/srcdat-symlink"); fc.createSymlink(file,symlink,false); util.waitReplication(fs,fileName,(short)3); long aTime=fc.getFileStatus(symlink).getAccessTime(); Thread.sleep(precision); setupAuditLogs(); String outStr=runFsck(conf,0,true,"/"); verifyAuditLogs(); assertEquals(aTime,fc.getFileStatus(symlink).getAccessTime()); System.out.println(outStr); assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS)); assertTrue(outStr.contains("Total symlinks:\t\t1")); util.cleanup(fs,fileName); } finally { if (fs != null) { try { fs.close(); } catch ( Exception e) { } } if (cluster != null) { cluster.shutdown(); } } }

Class: org.apache.hadoop.hdfs.server.namenode.TestGenericJournalConf

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Test that a dummy implementation of JournalManager can * be initialized on startup */ @Test public void testDummyJournalManager() throws Exception { MiniDFSCluster cluster=null; Configuration conf=new Configuration(); conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_PLUGIN_PREFIX + ".dummy",DummyJournalManager.class.getName()); conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,DUMMY_URI); conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKED_VOLUMES_MINIMUM_KEY,0); try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).build(); cluster.waitActive(); assertTrue(DummyJournalManager.shouldPromptCalled); assertTrue(DummyJournalManager.formatCalled); assertNotNull(DummyJournalManager.conf); assertEquals(new URI(DUMMY_URI),DummyJournalManager.uri); assertNotNull(DummyJournalManager.nsInfo); assertEquals(DummyJournalManager.nsInfo.getClusterID(),cluster.getNameNode().getNamesystem().getClusterId()); } finally { if (cluster != null) { cluster.shutdown(); } } }

Class: org.apache.hadoop.hdfs.server.namenode.TestHDFSConcat

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test that the concat operation is properly persisted in the * edit log, and properly replayed on restart. */ @Test public void testConcatInEditLog() throws Exception { final Path TEST_DIR=new Path("/testConcatInEditLog"); final long FILE_LEN=blockSize; Path[] srcFiles=new Path[3]; for (int i=0; i < srcFiles.length; i++) { Path path=new Path(TEST_DIR,"src-" + i); DFSTestUtil.createFile(dfs,path,FILE_LEN,REPL_FACTOR,1); srcFiles[i]=path; } Path targetFile=new Path(TEST_DIR,"target"); DFSTestUtil.createFile(dfs,targetFile,FILE_LEN,REPL_FACTOR,1); dfs.concat(targetFile,srcFiles); assertTrue(dfs.exists(targetFile)); FileStatus origStatus=dfs.getFileStatus(targetFile); cluster.restartNameNode(true); assertTrue(dfs.exists(targetFile)); assertFalse(dfs.exists(srcFiles[0])); FileStatus statusAfterRestart=dfs.getFileStatus(targetFile); assertEquals(origStatus.getModificationTime(),statusAfterRestart.getModificationTime()); }

IterativeVerifier UtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Concatenates 10 files into one * Verifies the final size, deletion of the file, number of blocks * @throws IOException */ @Test public void testConcat() throws IOException, InterruptedException { final int numFiles=10; long fileLen=blockSize * 3; HdfsFileStatus fStatus; FSDataInputStream stm; String trg=new String("/trg"); Path trgPath=new Path(trg); DFSTestUtil.createFile(dfs,trgPath,fileLen,REPL_FACTOR,1); fStatus=nn.getFileInfo(trg); long trgLen=fStatus.getLen(); long trgBlocks=nn.getBlockLocations(trg,0,trgLen).locatedBlockCount(); Path[] files=new Path[numFiles]; byte[][] bytes=new byte[numFiles][(int)fileLen]; LocatedBlocks[] lblocks=new LocatedBlocks[numFiles]; long[] lens=new long[numFiles]; int i=0; for (i=0; i < files.length; i++) { files[i]=new Path("/file" + i); Path path=files[i]; System.out.println("Creating file " + path); DFSTestUtil.createFile(dfs,path,fileLen,REPL_FACTOR,1); fStatus=nn.getFileInfo(path.toUri().getPath()); lens[i]=fStatus.getLen(); assertEquals(trgLen,lens[i]); lblocks[i]=nn.getBlockLocations(path.toUri().getPath(),0,lens[i]); stm=dfs.open(path); stm.readFully(0,bytes[i]); stm.close(); } final UserGroupInformation user1=UserGroupInformation.createUserForTesting("theDoctor",new String[]{"tardis"}); DistributedFileSystem hdfs=(DistributedFileSystem)DFSTestUtil.getFileSystemAs(user1,conf); try { hdfs.concat(trgPath,files); fail("Permission exception expected"); } catch ( IOException ie) { System.out.println("Got expected exception for permissions:" + ie.getLocalizedMessage()); } ContentSummary cBefore=dfs.getContentSummary(trgPath.getParent()); dfs.concat(trgPath,files); ContentSummary cAfter=dfs.getContentSummary(trgPath.getParent()); assertEquals(cBefore.getFileCount(),cAfter.getFileCount() + files.length); long totalLen=trgLen; long totalBlocks=trgBlocks; for (i=0; i < files.length; i++) { totalLen+=lens[i]; totalBlocks+=lblocks[i].locatedBlockCount(); } System.out.println("total len=" + totalLen + "; totalBlocks="+ totalBlocks); fStatus=nn.getFileInfo(trg); trgLen=fStatus.getLen(); stm=dfs.open(trgPath); byte[] byteFileConcat=new byte[(int)trgLen]; stm.readFully(0,byteFileConcat); stm.close(); trgBlocks=nn.getBlockLocations(trg,0,trgLen).locatedBlockCount(); assertEquals(trgBlocks,totalBlocks); assertEquals(trgLen,totalLen); for ( Path p : files) { fStatus=nn.getFileInfo(p.toUri().getPath()); assertNull("File " + p + " still exists",fStatus); DFSTestUtil.createFile(dfs,p,fileLen,REPL_FACTOR,1); } checkFileContent(byteFileConcat,bytes); Path smallFile=new Path("/sfile"); int sFileLen=10; DFSTestUtil.createFile(dfs,smallFile,sFileLen,REPL_FACTOR,1); dfs.concat(trgPath,new Path[]{smallFile}); fStatus=nn.getFileInfo(trg); trgLen=fStatus.getLen(); trgBlocks=nn.getBlockLocations(trg,0,trgLen).locatedBlockCount(); assertEquals(trgBlocks,totalBlocks + 1); assertEquals(trgLen,totalLen + sFileLen); }

TestInitializer InternalCallVerifier NullVerifier HybridVerifier 
@Before public void startUpCluster() throws IOException { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(REPL_FACTOR).build(); assertNotNull("Failed Cluster Creation",cluster); cluster.waitClusterUp(); dfs=cluster.getFileSystem(); assertNotNull("Failed to get FileSystem",dfs); nn=cluster.getNameNodeRpc(); assertNotNull("Failed to get NameNode",nn); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testConcatNotCompleteBlock() throws IOException { long trgFileLen=blockSize * 3; long srcFileLen=blockSize * 3 + 20; String name1="/trg", name2="/src"; Path filePath1=new Path(name1); DFSTestUtil.createFile(dfs,filePath1,trgFileLen,REPL_FACTOR,1); HdfsFileStatus fStatus=nn.getFileInfo(name1); long fileLen=fStatus.getLen(); assertEquals(fileLen,trgFileLen); FSDataInputStream stm=dfs.open(filePath1); byte[] byteFile1=new byte[(int)trgFileLen]; stm.readFully(0,byteFile1); stm.close(); LocatedBlocks lb1=nn.getBlockLocations(name1,0,trgFileLen); Path filePath2=new Path(name2); DFSTestUtil.createFile(dfs,filePath2,srcFileLen,REPL_FACTOR,1); fStatus=nn.getFileInfo(name2); fileLen=fStatus.getLen(); assertEquals(srcFileLen,fileLen); stm=dfs.open(filePath2); byte[] byteFile2=new byte[(int)srcFileLen]; stm.readFully(0,byteFile2); stm.close(); LocatedBlocks lb2=nn.getBlockLocations(name2,0,srcFileLen); System.out.println("trg len=" + trgFileLen + "; src len="+ srcFileLen); dfs.concat(filePath1,new Path[]{filePath2}); long totalLen=trgFileLen + srcFileLen; fStatus=nn.getFileInfo(name1); fileLen=fStatus.getLen(); stm=dfs.open(filePath1); byte[] byteFileConcat=new byte[(int)fileLen]; stm.readFully(0,byteFileConcat); stm.close(); LocatedBlocks lbConcat=nn.getBlockLocations(name1,0,fileLen); assertEquals(lbConcat.locatedBlockCount(),lb1.locatedBlockCount() + lb2.locatedBlockCount()); System.out.println("file1 len=" + fileLen + "; total len="+ totalLen); assertEquals(fileLen,totalLen); fStatus=nn.getFileInfo(name2); assertNull("File " + name2 + "still exists",fStatus); checkFileContent(byteFileConcat,new byte[][]{byteFile1,byteFile2}); }

UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
/** * test illegal args cases */ @Test public void testIllegalArg() throws IOException { long fileLen=blockSize * 3; Path parentDir=new Path("/parentTrg"); assertTrue(dfs.mkdirs(parentDir)); Path trg=new Path(parentDir,"trg"); DFSTestUtil.createFile(dfs,trg,fileLen,REPL_FACTOR,1); { Path dir1=new Path("/dir1"); assertTrue(dfs.mkdirs(dir1)); Path src=new Path(dir1,"src"); DFSTestUtil.createFile(dfs,src,fileLen,REPL_FACTOR,1); try { dfs.concat(trg,new Path[]{src}); fail("didn't fail for src and trg in different directories"); } catch ( Exception e) { } } try { dfs.concat(trg,new Path[]{new Path("test1/a")}); fail("didn't fail with invalid arguments"); } catch ( Exception e) { } try { dfs.concat(trg,new Path[]{}); fail("didn't fail with invalid arguments"); } catch ( Exception e) { } }

Class: org.apache.hadoop.hdfs.server.namenode.TestINodeFile

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
@Test public void testFilesInGetListingOps() throws Exception { final Configuration conf=new Configuration(); MiniDFSCluster cluster=null; try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); final DistributedFileSystem hdfs=cluster.getFileSystem(); final FSDirectory fsdir=cluster.getNamesystem().getFSDirectory(); hdfs.mkdirs(new Path("/tmp")); DFSTestUtil.createFile(hdfs,new Path("/tmp/f1"),0,(short)1,0); DFSTestUtil.createFile(hdfs,new Path("/tmp/f2"),0,(short)1,0); DFSTestUtil.createFile(hdfs,new Path("/tmp/f3"),0,(short)1,0); DirectoryListing dl=cluster.getNameNodeRpc().getListing("/tmp",HdfsFileStatus.EMPTY_NAME,false); assertTrue(dl.getPartialListing().length == 3); String f2=new String("f2"); dl=cluster.getNameNodeRpc().getListing("/tmp",f2.getBytes(),false); assertTrue(dl.getPartialListing().length == 1); INode f2INode=fsdir.getINode("/tmp/f2"); String f2InodePath="/.reserved/.inodes/" + f2INode.getId(); dl=cluster.getNameNodeRpc().getListing("/tmp",f2InodePath.getBytes(),false); assertTrue(dl.getPartialListing().length == 1); hdfs.delete(new Path("/tmp/f2"),false); try { dl=cluster.getNameNodeRpc().getListing("/tmp",f2InodePath.getBytes(),false); fail("Didn't get exception for the deleted startAfter token."); } catch ( IOException e) { assertTrue(e instanceof DirectoryListingStartAfterNotFoundException); } } finally { if (cluster != null) { cluster.shutdown(); } } }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testFileUnderConstruction(){ replication=3; final INodeFile file=new INodeFile(INodeId.GRANDFATHER_INODE_ID,null,perm,0L,0L,null,replication,1024L); assertFalse(file.isUnderConstruction()); final String clientName="client"; final String clientMachine="machine"; file.toUnderConstruction(clientName,clientMachine); assertTrue(file.isUnderConstruction()); FileUnderConstructionFeature uc=file.getFileUnderConstructionFeature(); assertEquals(clientName,uc.getClientName()); assertEquals(clientMachine,uc.getClientMachine()); file.toCompleteFile(Time.now()); assertFalse(file.isUnderConstruction()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Tests for addressing files using /.reserved/.inodes/ in file system * operations. */ @Test public void testInodeIdBasedPaths() throws Exception { Configuration conf=new Configuration(); conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY,true); MiniDFSCluster cluster=null; try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); DistributedFileSystem fs=cluster.getFileSystem(); NamenodeProtocols nnRpc=cluster.getNameNodeRpc(); Path baseDir=getInodePath(INodeId.ROOT_INODE_ID,"testInodeIdBasedPaths"); Path baseDirRegPath=new Path("/testInodeIdBasedPaths"); fs.mkdirs(baseDir); fs.exists(baseDir); long baseDirFileId=nnRpc.getFileInfo(baseDir.toString()).getFileId(); Path testFileInodePath=getInodePath(baseDirFileId,"test1"); Path testFileRegularPath=new Path(baseDir,"test1"); final int testFileBlockSize=1024; FileSystemTestHelper.createFile(fs,testFileInodePath,1,testFileBlockSize); assertTrue(fs.exists(testFileInodePath)); FsPermission perm=new FsPermission((short)0666); fs.setPermission(testFileInodePath,perm); FileStatus fileStatus=fs.getFileStatus(testFileInodePath); assertEquals(perm,fileStatus.getPermission()); fs.setOwner(testFileInodePath,fileStatus.getOwner(),fileStatus.getGroup()); fs.setTimes(testFileInodePath,0,0); fileStatus=fs.getFileStatus(testFileInodePath); assertEquals(0,fileStatus.getModificationTime()); assertEquals(0,fileStatus.getAccessTime()); fs.setReplication(testFileInodePath,(short)3); fileStatus=fs.getFileStatus(testFileInodePath); assertEquals(3,fileStatus.getReplication()); fs.setReplication(testFileInodePath,(short)1); assertEquals(testFileBlockSize,nnRpc.getPreferredBlockSize(testFileInodePath.toString())); { fs.isFileClosed(testFileInodePath); fs.getAclStatus(testFileInodePath); fs.getXAttrs(testFileInodePath); fs.listXAttrs(testFileInodePath); fs.access(testFileInodePath,FsAction.READ_WRITE); } String invalidTarget=new Path(baseDir,"invalidTarget").toString(); String link=new Path(baseDir,"link").toString(); testInvalidSymlinkTarget(nnRpc,invalidTarget,link); String validTarget="/validtarget"; testValidSymlinkTarget(nnRpc,validTarget,link); fs.append(testFileInodePath); fs.recoverLease(testFileInodePath); LocatedBlocks l1=nnRpc.getBlockLocations(testFileInodePath.toString(),0,Long.MAX_VALUE); LocatedBlocks l2=nnRpc.getBlockLocations(testFileRegularPath.toString(),0,Long.MAX_VALUE); checkEquals(l1,l2); Path renameDst=getInodePath(baseDirFileId,"test2"); fileStatus=fs.getFileStatus(testFileInodePath); fs.rename(testFileInodePath,renameDst); fs.rename(renameDst,testFileInodePath); assertEquals(fileStatus,fs.getFileStatus(testFileInodePath)); fs.rename(testFileInodePath,renameDst,Rename.OVERWRITE); fs.rename(renameDst,testFileInodePath,Rename.OVERWRITE); assertEquals(fileStatus,fs.getFileStatus(testFileInodePath)); assertEquals(fs.getContentSummary(testFileRegularPath).toString(),fs.getContentSummary(testFileInodePath).toString()); checkEquals(fs.listFiles(baseDirRegPath,false),fs.listFiles(baseDir,false)); fs.delete(testFileInodePath,true); assertFalse(fs.exists(testFileInodePath)); } finally { if (cluster != null) { cluster.shutdown(); } } }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * This test verifies inode ID counter and inode map functionality. */ @Test public void testInodeId() throws IOException { Configuration conf=new Configuration(); conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT); MiniDFSCluster cluster=null; try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); FSNamesystem fsn=cluster.getNamesystem(); long lastId=fsn.getLastInodeId(); int inodeCount=1; long expectedLastInodeId=INodeId.ROOT_INODE_ID; assertEquals(fsn.dir.rootDir.getId(),INodeId.ROOT_INODE_ID); assertEquals(expectedLastInodeId,lastId); assertEquals(inodeCount,fsn.dir.getInodeMapSize()); FileSystem fs=cluster.getFileSystem(); Path path=new Path("/test1"); assertTrue(fs.mkdirs(path)); assertEquals(++expectedLastInodeId,fsn.getLastInodeId()); assertEquals(++inodeCount,fsn.dir.getInodeMapSize()); NamenodeProtocols nnrpc=cluster.getNameNodeRpc(); DFSTestUtil.createFile(fs,new Path("/test1/file"),1024,(short)1,0); assertEquals(++expectedLastInodeId,fsn.getLastInodeId()); assertEquals(++inodeCount,fsn.dir.getInodeMapSize()); HdfsFileStatus fileStatus=nnrpc.getFileInfo("/test1/file"); assertEquals(expectedLastInodeId,fileStatus.getFileId()); Path renamedPath=new Path("/test2"); assertTrue(fs.rename(path,renamedPath)); assertEquals(expectedLastInodeId,fsn.getLastInodeId()); assertEquals(inodeCount,fsn.dir.getInodeMapSize()); assertTrue(fs.delete(renamedPath,true)); inodeCount-=2; assertEquals(inodeCount,fsn.dir.getInodeMapSize()); String file1="/test1/file1"; String file2="/test1/file2"; DFSTestUtil.createFile(fs,new Path(file1),512,(short)1,0); DFSTestUtil.createFile(fs,new Path(file2),512,(short)1,0); inodeCount+=3; expectedLastInodeId+=3; assertEquals(inodeCount,fsn.dir.getInodeMapSize()); assertEquals(expectedLastInodeId,fsn.getLastInodeId()); nnrpc.concat(file2,new String[]{file1}); inodeCount--; assertEquals(inodeCount,fsn.dir.getInodeMapSize()); assertEquals(expectedLastInodeId,fsn.getLastInodeId()); assertTrue(fs.delete(new Path("/test1"),true)); inodeCount-=2; assertEquals(inodeCount,fsn.dir.getInodeMapSize()); cluster.restartNameNode(); cluster.waitActive(); fsn=cluster.getNamesystem(); assertEquals(expectedLastInodeId,fsn.getLastInodeId()); assertEquals(inodeCount,fsn.dir.getInodeMapSize()); DFSTestUtil.createFile(fs,new Path("/test2/file2"),1024,(short)1,0); expectedLastInodeId+=2; inodeCount+=2; assertEquals(expectedLastInodeId,fsn.getLastInodeId()); assertEquals(inodeCount,fsn.dir.getInodeMapSize()); FSDataOutputStream outStream=fs.create(new Path("/test3/file")); assertTrue(outStream != null); expectedLastInodeId+=2; inodeCount+=2; assertEquals(expectedLastInodeId,fsn.getLastInodeId()); assertEquals(inodeCount,fsn.dir.getInodeMapSize()); fsn.enterSafeMode(false); fsn.saveNamespace(); fsn.leaveSafeMode(); outStream.close(); cluster.restartNameNode(); cluster.waitActive(); fsn=cluster.getNamesystem(); assertEquals(expectedLastInodeId,fsn.getLastInodeId()); assertEquals(inodeCount,fsn.dir.getInodeMapSize()); } finally { if (cluster != null) { cluster.shutdown(); } } }

UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
@Test(timeout=120000) public void testWriteToDeletedFile() throws IOException { Configuration conf=new Configuration(); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); FileSystem fs=cluster.getFileSystem(); Path path=new Path("/test1"); assertTrue(fs.mkdirs(path)); int size=conf.getInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY,512); byte[] data=new byte[size]; Path filePath=new Path("/test1/file"); FSDataOutputStream fos=fs.create(filePath); fs.delete(filePath,false); try { fos.write(data,0,data.length); fos.hflush(); fail("Write should fail after delete"); } catch ( Exception e) { } finally { cluster.shutdown(); } }

APIUtilityVerifier UtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Tests for {@link FSDirectory#resolvePath(String,byte[][],FSDirectory)} */ @Test public void testInodePath() throws IOException { String path="/a/b/c"; INode inode=createTreeOfInodes(path); FSDirectory fsd=Mockito.mock(FSDirectory.class); Mockito.doReturn(inode).when(fsd).getInode(Mockito.anyLong()); assertEquals("/test",FSDirectory.resolvePath("/test",null,fsd)); byte[][] components=INode.getPathComponents(path); String resolvedPath=FSDirectory.resolvePath(path,components,fsd); assertEquals(path,resolvedPath); components=INode.getPathComponents("/.reserved/.inodes/1"); resolvedPath=FSDirectory.resolvePath(path,components,fsd); assertEquals(path,resolvedPath); components=INode.getPathComponents("/.reserved/.inodes/1/"); assertEquals(path,resolvedPath); components=INode.getPathComponents("/.reserved/.inodes/1/d/e/f"); resolvedPath=FSDirectory.resolvePath(path,components,fsd); assertEquals("/a/b/c/d/e/f",resolvedPath); String testPath="/.reserved/.inodes"; components=INode.getPathComponents(testPath); resolvedPath=FSDirectory.resolvePath(testPath,components,fsd); assertEquals(testPath,resolvedPath); testPath="/.reserved/.inodes/" + INodeId.ROOT_INODE_ID; components=INode.getPathComponents(testPath); resolvedPath=FSDirectory.resolvePath(testPath,components,fsd); assertEquals("/",resolvedPath); testPath="/.invalid/.inodes/1"; components=INode.getPathComponents(testPath); resolvedPath=FSDirectory.resolvePath(testPath,components,fsd); assertEquals(testPath,resolvedPath); Mockito.doReturn(null).when(fsd).getInode(Mockito.anyLong()); testPath="/.reserved/.inodes/1234"; components=INode.getPathComponents(testPath); try { String realPath=FSDirectory.resolvePath(testPath,components,fsd); fail("Path should not be resolved:" + realPath); } catch ( IOException e) { assertTrue(e instanceof FileNotFoundException); } }

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
/** * Test for the static {@link INodeFile#valueOf(INode,String)}and {@link INodeFileUnderConstruction#valueOf(INode,String)} methods. * @throws IOException */ @Test public void testValueOf() throws IOException { final String path="/testValueOf"; final short replication=3; { final INode from=null; try { INodeFile.valueOf(from,path); fail(); } catch ( FileNotFoundException fnfe) { assertTrue(fnfe.getMessage().contains("File does not exist")); } try { INodeDirectory.valueOf(from,path); fail(); } catch ( FileNotFoundException e) { assertTrue(e.getMessage().contains("Directory does not exist")); } } { final INode from=createINodeFile(replication,preferredBlockSize); final INodeFile f=INodeFile.valueOf(from,path); assertTrue(f == from); try { INodeDirectory.valueOf(from,path); fail(); } catch ( PathIsNotDirectoryException e) { } } { final INode from=new INodeFile(INodeId.GRANDFATHER_INODE_ID,null,perm,0L,0L,null,replication,1024L); from.asFile().toUnderConstruction("client","machine"); final INodeFile f=INodeFile.valueOf(from,path); assertTrue(f == from); try { INodeDirectory.valueOf(from,path); fail(); } catch ( PathIsNotDirectoryException expected) { } } { final INode from=new INodeDirectory(INodeId.GRANDFATHER_INODE_ID,null,perm,0L); try { INodeFile.valueOf(from,path); fail(); } catch ( FileNotFoundException fnfe) { assertTrue(fnfe.getMessage().contains("Path is not a file")); } final INodeDirectory d=INodeDirectory.valueOf(from,path); assertTrue(d == from); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier HybridVerifier 
/** * Test whether the inode in inodeMap has been replaced after regular inode * replacement */ @Test public void testInodeReplacement() throws Exception { final Configuration conf=new Configuration(); MiniDFSCluster cluster=null; try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); final DistributedFileSystem hdfs=cluster.getFileSystem(); final FSDirectory fsdir=cluster.getNamesystem().getFSDirectory(); final Path dir=new Path("/dir"); hdfs.mkdirs(dir); INodeDirectory dirNode=getDir(fsdir,dir); INode dirNodeFromNode=fsdir.getInode(dirNode.getId()); assertSame(dirNode,dirNodeFromNode); hdfs.setQuota(dir,Long.MAX_VALUE - 1,Long.MAX_VALUE - 1); dirNode=getDir(fsdir,dir); assertTrue(dirNode.isWithQuota()); dirNodeFromNode=fsdir.getInode(dirNode.getId()); assertSame(dirNode,dirNodeFromNode); hdfs.setQuota(dir,-1,-1); dirNode=getDir(fsdir,dir); dirNodeFromNode=fsdir.getInode(dirNode.getId()); assertSame(dirNode,dirNodeFromNode); } finally { if (cluster != null) { cluster.shutdown(); } } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * FSDirectory#unprotectedSetQuota creates a new INodeDirectoryWithQuota to * replace the original INodeDirectory. Before HDFS-4243, the parent field of * all the children INodes of the target INodeDirectory is not changed to * point to the new INodeDirectoryWithQuota. This testcase tests this * scenario. */ @Test public void testGetFullPathNameAfterSetQuota() throws Exception { long fileLen=1024; replication=3; Configuration conf=new Configuration(); MiniDFSCluster cluster=null; try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(replication).build(); cluster.waitActive(); FSNamesystem fsn=cluster.getNamesystem(); FSDirectory fsdir=fsn.getFSDirectory(); DistributedFileSystem dfs=cluster.getFileSystem(); final Path dir=new Path("/dir"); final Path file=new Path(dir,"file"); DFSTestUtil.createFile(dfs,file,fileLen,replication,0L); INode fnode=fsdir.getINode(file.toString()); assertEquals(file.toString(),fnode.getFullPathName()); dfs.setQuota(dir,Long.MAX_VALUE - 1,replication * fileLen * 10); INodeDirectory dirNode=getDir(fsdir,dir); assertEquals(dir.toString(),dirNode.getFullPathName()); assertTrue(dirNode.isWithQuota()); final Path newDir=new Path("/newdir"); final Path newFile=new Path(newDir,"file"); dfs.rename(dir,newDir,Options.Rename.OVERWRITE); fnode=fsdir.getINode(newFile.toString()); assertEquals(newFile.toString(),fnode.getFullPathName()); } finally { if (cluster != null) { cluster.shutdown(); } } }

Class: org.apache.hadoop.hdfs.server.namenode.TestLargeDirectoryDelete

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void largeDelete() throws Throwable { mc=new MiniDFSCluster.Builder(CONF).build(); try { mc.waitActive(); Assert.assertNotNull("No Namenode in cluster",mc.getNameNode()); createFiles(); Assert.assertEquals(TOTAL_BLOCKS,getBlockCount()); runThreads(); } finally { mc.shutdown(); } }

Class: org.apache.hadoop.hdfs.server.namenode.TestMetaSave

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Tests metasave */ @Test public void testMetaSave() throws IOException, InterruptedException { for (int i=0; i < 2; i++) { Path file=new Path("/filestatus" + i); DFSTestUtil.createFile(fileSys,file,1024,1024,blockSize,(short)2,seed); } cluster.stopDataNode(1); Thread.sleep(15000); namesystem.setReplication("/filestatus0",(short)4); namesystem.metaSave("metasave.out.txt"); FileInputStream fstream=new FileInputStream(getLogFile("metasave.out.txt")); DataInputStream in=new DataInputStream(fstream); BufferedReader reader=null; try { reader=new BufferedReader(new InputStreamReader(in)); String line=reader.readLine(); Assert.assertEquals("3 files and directories, 2 blocks = 5 total filesystem objects",line); line=reader.readLine(); assertTrue(line.equals("Live Datanodes: 1")); line=reader.readLine(); assertTrue(line.equals("Dead Datanodes: 1")); line=reader.readLine(); line=reader.readLine(); assertTrue(line.matches("^/filestatus[01]:.*")); } finally { if (reader != null) reader.close(); } }

Class: org.apache.hadoop.hdfs.server.namenode.TestNameCache

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testDictionary() throws Exception { NameCache cache=new NameCache(2); String[] matching={"part1","part10000000","fileabc","abc","filepart"}; String[] notMatching={"spart1","apart","abcd","def"}; for ( String s : matching) { cache.put(s); assertTrue(s == cache.put(s)); } for ( String s : notMatching) { cache.put(s); } cache.initialized(); for ( String s : matching) { verifyNameReuse(cache,s,true); } assertEquals(matching.length,cache.size()); for ( String s : notMatching) { verifyNameReuse(cache,s,false); } cache.reset(); cache.initialized(); for ( String s : matching) { verifyNameReuse(cache,s,false); } for ( String s : notMatching) { verifyNameReuse(cache,s,false); } }

Class: org.apache.hadoop.hdfs.server.namenode.TestNameEditsConfigs

UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
/** * Test various configuration options of dfs.namenode.name.dir and dfs.namenode.edits.dir * This test tries to simulate failure scenarios. * 1. Start cluster with shared name and edits dir * 2. Restart cluster by adding separate name and edits dirs * 3. Restart cluster by removing shared name and edits dir * 4. Restart cluster with old shared name and edits dir, but only latest * name dir. This should fail since we don't have latest edits dir * 5. Restart cluster with old shared name and edits dir, but only latest * edits dir. This should succeed since the latest edits will have * segments leading all the way from the image in name_and_edits. */ @Test public void testNameEditsConfigsFailure() throws IOException { Path file1=new Path("TestNameEditsConfigs1"); Path file2=new Path("TestNameEditsConfigs2"); Path file3=new Path("TestNameEditsConfigs3"); MiniDFSCluster cluster=null; Configuration conf=null; FileSystem fileSys=null; File nameOnlyDir=new File(base_dir,"name"); File editsOnlyDir=new File(base_dir,"edits"); File nameAndEditsDir=new File(base_dir,"name_and_edits"); conf=new HdfsConfiguration(); conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,nameAndEditsDir.getPath()); conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,nameAndEditsDir.getPath()); replication=(short)conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY,3); try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).manageNameDfsDirs(false).build(); cluster.waitActive(); assertTrue(new File(nameAndEditsDir,"current/VERSION").exists()); fileSys=cluster.getFileSystem(); assertTrue(!fileSys.exists(file1)); DFSTestUtil.createFile(fileSys,file1,FILE_SIZE,FILE_SIZE,BLOCK_SIZE,replication,SEED); checkFile(fileSys,file1,replication); } finally { fileSys.close(); cluster.shutdown(); } conf=new HdfsConfiguration(); assertTrue(nameOnlyDir.mkdir()); assertTrue(editsOnlyDir.mkdir()); conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,nameAndEditsDir.getPath() + "," + nameOnlyDir.getPath()); conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,nameAndEditsDir.getPath() + "," + editsOnlyDir.getPath()); replication=(short)conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY,3); try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).format(false).manageNameDfsDirs(false).build(); cluster.waitActive(); assertTrue(new File(nameAndEditsDir,"current/VERSION").exists()); assertTrue(new File(nameOnlyDir,"current/VERSION").exists()); assertTrue(new File(editsOnlyDir,"current/VERSION").exists()); fileSys=cluster.getFileSystem(); assertTrue(fileSys.exists(file1)); checkFile(fileSys,file1,replication); cleanupFile(fileSys,file1); DFSTestUtil.createFile(fileSys,file2,FILE_SIZE,FILE_SIZE,BLOCK_SIZE,replication,SEED); checkFile(fileSys,file2,replication); } finally { fileSys.close(); cluster.shutdown(); } try { conf=new HdfsConfiguration(); conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,nameOnlyDir.getPath()); conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,editsOnlyDir.getPath()); replication=(short)conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY,3); cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).format(false).manageNameDfsDirs(false).build(); cluster.waitActive(); fileSys=cluster.getFileSystem(); assertFalse(fileSys.exists(file1)); assertTrue(fileSys.exists(file2)); checkFile(fileSys,file2,replication); cleanupFile(fileSys,file2); DFSTestUtil.createFile(fileSys,file3,FILE_SIZE,FILE_SIZE,BLOCK_SIZE,replication,SEED); checkFile(fileSys,file3,replication); } finally { fileSys.close(); cluster.shutdown(); } conf=new HdfsConfiguration(); conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,nameOnlyDir.getPath() + "," + nameAndEditsDir.getPath()); conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,nameAndEditsDir.getPath()); replication=(short)conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY,3); try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).format(false).manageNameDfsDirs(false).build(); fail("Successfully started cluster but should not have been able to."); } catch ( IOException e) { LOG.info("EXPECTED: cluster start failed due to missing " + "latest edits dir",e); } finally { if (cluster != null) { cluster.shutdown(); } cluster=null; } conf=new HdfsConfiguration(); conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,nameAndEditsDir.getPath()); conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,editsOnlyDir.getPath() + "," + nameAndEditsDir.getPath()); replication=(short)conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY,3); try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).format(false).manageNameDfsDirs(false).build(); fileSys=cluster.getFileSystem(); assertFalse(fileSys.exists(file1)); assertFalse(fileSys.exists(file2)); assertTrue(fileSys.exists(file3)); checkFile(fileSys,file3,replication); cleanupFile(fileSys,file3); DFSTestUtil.createFile(fileSys,file3,FILE_SIZE,FILE_SIZE,BLOCK_SIZE,replication,SEED); checkFile(fileSys,file3,replication); } finally { fileSys.close(); cluster.shutdown(); } }

Class: org.apache.hadoop.hdfs.server.namenode.TestNameNodeMXBean

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@SuppressWarnings({"unchecked"}) @Test public void testNameNodeMXBeanInfo() throws Exception { Configuration conf=new Configuration(); conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,NativeIO.POSIX.getCacheManipulator().getMemlockLimit()); conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,1); conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,1); MiniDFSCluster cluster=null; try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); cluster.waitActive(); FSNamesystem fsn=cluster.getNameNode().namesystem; MBeanServer mbs=ManagementFactory.getPlatformMBeanServer(); ObjectName mxbeanName=new ObjectName("Hadoop:service=NameNode,name=NameNodeInfo"); FileSystem localFileSys=FileSystem.getLocal(conf); Path workingDir=localFileSys.getWorkingDirectory(); Path dir=new Path(workingDir,"build/test/data/temp/TestNameNodeMXBean"); Path includeFile=new Path(dir,"include"); assertTrue(localFileSys.mkdirs(dir)); StringBuilder includeHosts=new StringBuilder(); for ( DataNode dn : cluster.getDataNodes()) { includeHosts.append(dn.getDisplayName()).append("\n"); } DFSTestUtil.writeFile(localFileSys,includeFile,includeHosts.toString()); conf.set(DFSConfigKeys.DFS_HOSTS,includeFile.toUri().getPath()); fsn.getBlockManager().getDatanodeManager().refreshNodes(conf); cluster.stopDataNode(0); while (fsn.getNumDatanodesInService() != 2) { try { Thread.sleep(1000); } catch ( InterruptedException e) { } } String clusterId=(String)mbs.getAttribute(mxbeanName,"ClusterId"); assertEquals(fsn.getClusterId(),clusterId); String blockpoolId=(String)mbs.getAttribute(mxbeanName,"BlockPoolId"); assertEquals(fsn.getBlockPoolId(),blockpoolId); String version=(String)mbs.getAttribute(mxbeanName,"Version"); assertEquals(fsn.getVersion(),version); assertTrue(version.equals(VersionInfo.getVersion() + ", r" + VersionInfo.getRevision())); Long used=(Long)mbs.getAttribute(mxbeanName,"Used"); assertEquals(fsn.getUsed(),used.longValue()); Long total=(Long)mbs.getAttribute(mxbeanName,"Total"); assertEquals(fsn.getTotal(),total.longValue()); String safemode=(String)mbs.getAttribute(mxbeanName,"Safemode"); assertEquals(fsn.getSafemode(),safemode); Long nondfs=(Long)(mbs.getAttribute(mxbeanName,"NonDfsUsedSpace")); assertEquals(fsn.getNonDfsUsedSpace(),nondfs.longValue()); Float percentremaining=(Float)(mbs.getAttribute(mxbeanName,"PercentRemaining")); assertEquals(fsn.getPercentRemaining(),percentremaining.floatValue(),DELTA); Long totalblocks=(Long)(mbs.getAttribute(mxbeanName,"TotalBlocks")); assertEquals(fsn.getTotalBlocks(),totalblocks.longValue()); String alivenodeinfo=(String)(mbs.getAttribute(mxbeanName,"LiveNodes")); Map> liveNodes=(Map>)JSON.parse(alivenodeinfo); assertTrue(liveNodes.size() > 0); for ( Map liveNode : liveNodes.values()) { assertTrue(liveNode.containsKey("nonDfsUsedSpace")); assertTrue(((Long)liveNode.get("nonDfsUsedSpace")) > 0); assertTrue(liveNode.containsKey("capacity")); assertTrue(((Long)liveNode.get("capacity")) > 0); assertTrue(liveNode.containsKey("numBlocks")); assertTrue(((Long)liveNode.get("numBlocks")) == 0); } assertEquals(fsn.getLiveNodes(),alivenodeinfo); String deadnodeinfo=(String)(mbs.getAttribute(mxbeanName,"DeadNodes")); assertEquals(fsn.getDeadNodes(),deadnodeinfo); Map> deadNodes=(Map>)JSON.parse(deadnodeinfo); assertTrue(deadNodes.size() > 0); for ( Map deadNode : deadNodes.values()) { assertTrue(deadNode.containsKey("lastContact")); assertTrue(deadNode.containsKey("decommissioned")); assertTrue(deadNode.containsKey("xferaddr")); } String nodeUsage=(String)(mbs.getAttribute(mxbeanName,"NodeUsage")); assertEquals("Bad value for NodeUsage",fsn.getNodeUsage(),nodeUsage); String nameJournalStatus=(String)(mbs.getAttribute(mxbeanName,"NameJournalStatus")); assertEquals("Bad value for NameJournalStatus",fsn.getNameJournalStatus(),nameJournalStatus); String journalTxnInfo=(String)mbs.getAttribute(mxbeanName,"JournalTransactionInfo"); assertEquals("Bad value for NameTxnIds",fsn.getJournalTransactionInfo(),journalTxnInfo); String nnStarted=(String)mbs.getAttribute(mxbeanName,"NNStarted"); assertEquals("Bad value for NNStarted",fsn.getNNStarted(),nnStarted); String compileInfo=(String)mbs.getAttribute(mxbeanName,"CompileInfo"); assertEquals("Bad value for CompileInfo",fsn.getCompileInfo(),compileInfo); String corruptFiles=(String)(mbs.getAttribute(mxbeanName,"CorruptFiles")); assertEquals("Bad value for CorruptFiles",fsn.getCorruptFiles(),corruptFiles); String nameDirStatuses=(String)(mbs.getAttribute(mxbeanName,"NameDirStatuses")); assertEquals(fsn.getNameDirStatuses(),nameDirStatuses); Map> statusMap=(Map>)JSON.parse(nameDirStatuses); Collection nameDirUris=cluster.getNameDirs(0); for ( URI nameDirUri : nameDirUris) { File nameDir=new File(nameDirUri); System.out.println("Checking for the presence of " + nameDir + " in active name dirs."); assertTrue(statusMap.get("active").containsKey(nameDir.getAbsolutePath())); } assertEquals(2,statusMap.get("active").size()); assertEquals(0,statusMap.get("failed").size()); File failedNameDir=new File(nameDirUris.iterator().next()); assertEquals(0,FileUtil.chmod(new File(failedNameDir,"current").getAbsolutePath(),"000")); cluster.getNameNodeRpc().rollEditLog(); nameDirStatuses=(String)(mbs.getAttribute(mxbeanName,"NameDirStatuses")); statusMap=(Map>)JSON.parse(nameDirStatuses); for ( URI nameDirUri : nameDirUris) { File nameDir=new File(nameDirUri); String expectedStatus=nameDir.equals(failedNameDir) ? "failed" : "active"; System.out.println("Checking for the presence of " + nameDir + " in "+ expectedStatus+ " name dirs."); assertTrue(statusMap.get(expectedStatus).containsKey(nameDir.getAbsolutePath())); } assertEquals(1,statusMap.get("active").size()); assertEquals(1,statusMap.get("failed").size()); assertEquals(0L,mbs.getAttribute(mxbeanName,"CacheUsed")); assertEquals(NativeIO.POSIX.getCacheManipulator().getMemlockLimit() * cluster.getDataNodes().size(),mbs.getAttribute(mxbeanName,"CacheCapacity")); } finally { if (cluster != null) { for ( URI dir : cluster.getNameDirs(0)) { FileUtil.chmod(new File(new File(dir),"current").getAbsolutePath(),"755"); } cluster.shutdown(); } } }

Class: org.apache.hadoop.hdfs.server.namenode.TestNameNodeOptionParsing

BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testUpgrade(){ StartupOption opt=null; opt=NameNode.parseArguments(new String[]{"-upgrade"}); assertEquals(opt,StartupOption.UPGRADE); assertNull(opt.getClusterId()); assertTrue(FSImageFormat.renameReservedMap.isEmpty()); opt=NameNode.parseArguments(new String[]{"-upgrade","-clusterid","mycid"}); assertEquals(StartupOption.UPGRADE,opt); assertEquals("mycid",opt.getClusterId()); assertTrue(FSImageFormat.renameReservedMap.isEmpty()); opt=NameNode.parseArguments(new String[]{"-upgrade","-clusterid","mycid","-renameReserved",".snapshot=.my-snapshot,.reserved=.my-reserved"}); assertEquals(StartupOption.UPGRADE,opt); assertEquals("mycid",opt.getClusterId()); assertEquals(".my-snapshot",FSImageFormat.renameReservedMap.get(".snapshot")); assertEquals(".my-reserved",FSImageFormat.renameReservedMap.get(".reserved")); FSImageFormat.renameReservedMap.clear(); opt=NameNode.parseArguments(new String[]{"-upgrade","-renameReserved",".reserved=.my-reserved,.snapshot=.my-snapshot","-clusterid","mycid"}); assertEquals(StartupOption.UPGRADE,opt); assertEquals("mycid",opt.getClusterId()); assertEquals(".my-snapshot",FSImageFormat.renameReservedMap.get(".snapshot")); assertEquals(".my-reserved",FSImageFormat.renameReservedMap.get(".reserved")); opt=NameNode.parseArguments(new String[]{"-upgrade","-renameReserved"}); assertEquals(StartupOption.UPGRADE,opt); assertEquals(".snapshot." + HdfsConstants.NAMENODE_LAYOUT_VERSION + ".UPGRADE_RENAMED",FSImageFormat.renameReservedMap.get(".snapshot")); assertEquals(".reserved." + HdfsConstants.NAMENODE_LAYOUT_VERSION + ".UPGRADE_RENAMED",FSImageFormat.renameReservedMap.get(".reserved")); try { opt=NameNode.parseArguments(new String[]{"-upgrade","-renameReserved",".reserved=.my-reserved,.not-reserved=.my-not-reserved"}); } catch ( IllegalArgumentException e) { assertExceptionContains("Unknown reserved path",e); } try { opt=NameNode.parseArguments(new String[]{"-upgrade","-renameReserved",".reserved=.my-reserved,.snapshot=.snapshot"}); } catch ( IllegalArgumentException e) { assertExceptionContains("Invalid rename path",e); } try { opt=NameNode.parseArguments(new String[]{"-upgrade","-renameReserved",".snapshot=.reserved"}); } catch ( IllegalArgumentException e) { assertExceptionContains("Invalid rename path",e); } opt=NameNode.parseArguments(new String[]{"-upgrade","-cid"}); assertNull(opt); }

Class: org.apache.hadoop.hdfs.server.namenode.TestNamenodeRetryCache

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
/** * Test for crateSnapshot */ @Test public void testSnapshotMethods() throws Exception { String dir="/testNamenodeRetryCache/testCreateSnapshot/src"; resetCall(); namesystem.mkdirs(dir,perm,true); namesystem.allowSnapshot(dir); newCall(); String name=namesystem.createSnapshot(dir,"snap1"); Assert.assertEquals(name,namesystem.createSnapshot(dir,"snap1")); Assert.assertEquals(name,namesystem.createSnapshot(dir,"snap1")); Assert.assertEquals(name,namesystem.createSnapshot(dir,"snap1")); newCall(); try { namesystem.createSnapshot(dir,"snap1"); Assert.fail("testSnapshotMethods expected exception is not thrown"); } catch ( IOException e) { } newCall(); namesystem.renameSnapshot(dir,"snap1","snap2"); namesystem.renameSnapshot(dir,"snap1","snap2"); namesystem.renameSnapshot(dir,"snap1","snap2"); newCall(); try { namesystem.renameSnapshot(dir,"snap1","snap2"); Assert.fail("testSnapshotMethods expected exception is not thrown"); } catch ( IOException e) { } newCall(); namesystem.deleteSnapshot(dir,"snap2"); namesystem.deleteSnapshot(dir,"snap2"); namesystem.deleteSnapshot(dir,"snap2"); newCall(); try { namesystem.deleteSnapshot(dir,"snap2"); Assert.fail("testSnapshotMethods expected exception is not thrown"); } catch ( IOException e) { } }

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * After run a set of operations, restart NN and check if the retry cache has * been rebuilt based on the editlog. */ @Test public void testRetryCacheRebuild() throws Exception { DFSTestUtil.runOperations(cluster,filesystem,conf,BlockSize,0); LightWeightCache cacheSet=(LightWeightCache)namesystem.getRetryCache().getCacheSet(); assertEquals(23,cacheSet.size()); Map oldEntries=new HashMap(); Iterator iter=cacheSet.iterator(); while (iter.hasNext()) { CacheEntry entry=iter.next(); oldEntries.put(entry,entry); } cluster.restartNameNode(); cluster.waitActive(); namesystem=cluster.getNamesystem(); assertTrue(namesystem.hasRetryCache()); cacheSet=(LightWeightCache)namesystem.getRetryCache().getCacheSet(); assertEquals(23,cacheSet.size()); iter=cacheSet.iterator(); while (iter.hasNext()) { CacheEntry entry=iter.next(); assertTrue(oldEntries.containsKey(entry)); } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
/** * Test for create file */ @Test public void testCreate() throws Exception { String src="/testNamenodeRetryCache/testCreate/file"; newCall(); HdfsFileStatus status=namesystem.startFile(src,perm,"holder","clientmachine",EnumSet.of(CreateFlag.CREATE),true,(short)1,BlockSize,null); Assert.assertEquals(status,namesystem.startFile(src,perm,"holder","clientmachine",EnumSet.of(CreateFlag.CREATE),true,(short)1,BlockSize,null)); Assert.assertEquals(status,namesystem.startFile(src,perm,"holder","clientmachine",EnumSet.of(CreateFlag.CREATE),true,(short)1,BlockSize,null)); newCall(); try { namesystem.startFile(src,perm,"holder","clientmachine",EnumSet.of(CreateFlag.CREATE),true,(short)1,BlockSize,null); Assert.fail("testCreate - expected exception is not thrown"); } catch ( IOException e) { } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
/** * Test for rename1 */ @Test public void testAppend() throws Exception { String src="/testNamenodeRetryCache/testAppend/src"; resetCall(); DFSTestUtil.createFile(filesystem,new Path(src),128,(short)1,0L); newCall(); LocatedBlock b=namesystem.appendFile(src,"holder","clientMachine"); Assert.assertEquals(b,namesystem.appendFile(src,"holder","clientMachine")); Assert.assertEquals(b,namesystem.appendFile(src,"holder","clientMachine")); newCall(); try { namesystem.appendFile(src,"holder","clientMachine"); Assert.fail("testAppend - expected exception is not thrown"); } catch ( Exception e) { } }

Class: org.apache.hadoop.hdfs.server.namenode.TestParallelImageWrite

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
/** * check if DFS remains in proper condition after a restart */ @Test public void testRestartDFS() throws Exception { final Configuration conf=new HdfsConfiguration(); MiniDFSCluster cluster=null; FSNamesystem fsn=null; int numNamenodeDirs; DFSTestUtil files=new DFSTestUtil.Builder().setName("TestRestartDFS").setNumFiles(200).build(); final String dir="/srcdat"; final Path rootpath=new Path("/"); final Path dirpath=new Path(dir); long rootmtime; FileStatus rootstatus; FileStatus dirstatus; try { cluster=new MiniDFSCluster.Builder(conf).format(true).numDataNodes(NUM_DATANODES).build(); String[] nameNodeDirs=conf.getStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,new String[]{}); numNamenodeDirs=nameNodeDirs.length; assertTrue("failed to get number of Namenode StorageDirs",numNamenodeDirs != 0); FileSystem fs=cluster.getFileSystem(); files.createFiles(fs,dir); rootmtime=fs.getFileStatus(rootpath).getModificationTime(); rootstatus=fs.getFileStatus(dirpath); dirstatus=fs.getFileStatus(dirpath); fs.setOwner(rootpath,rootstatus.getOwner() + "_XXX",null); fs.setOwner(dirpath,null,dirstatus.getGroup() + "_XXX"); } finally { if (cluster != null) { cluster.shutdown(); } } try { conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY,1); cluster=new MiniDFSCluster.Builder(conf).format(false).numDataNodes(NUM_DATANODES).build(); fsn=cluster.getNamesystem(); FileSystem fs=cluster.getFileSystem(); assertTrue("Filesystem corrupted after restart.",files.checkFiles(fs,dir)); final FileStatus newrootstatus=fs.getFileStatus(rootpath); assertEquals(rootmtime,newrootstatus.getModificationTime()); assertEquals(rootstatus.getOwner() + "_XXX",newrootstatus.getOwner()); assertEquals(rootstatus.getGroup(),newrootstatus.getGroup()); final FileStatus newdirstatus=fs.getFileStatus(dirpath); assertEquals(dirstatus.getOwner(),newdirstatus.getOwner()); assertEquals(dirstatus.getGroup() + "_XXX",newdirstatus.getGroup()); rootmtime=fs.getFileStatus(rootpath).getModificationTime(); final String checkAfterRestart=checkImages(fsn,numNamenodeDirs); files.cleanup(fs,dir); files.createFiles(fs,dir); fsn.setSafeMode(SafeModeAction.SAFEMODE_ENTER); cluster.getNameNodeRpc().saveNamespace(); final String checkAfterModify=checkImages(fsn,numNamenodeDirs); assertFalse("Modified namespace should change fsimage contents. " + "was: " + checkAfterRestart + " now: "+ checkAfterModify,checkAfterRestart.equals(checkAfterModify)); fsn.setSafeMode(SafeModeAction.SAFEMODE_LEAVE); files.cleanup(fs,dir); } finally { if (cluster != null) { cluster.shutdown(); } } }

Class: org.apache.hadoop.hdfs.server.namenode.TestSecureNameNode

APIUtilityVerifier UtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testName() throws IOException, InterruptedException { MiniDFSCluster cluster=null; try { String keyTabDir=System.getProperty("kdc.resource.dir") + "/keytabs"; String nn1KeytabPath=keyTabDir + "/nn1.keytab"; String user1KeyTabPath=keyTabDir + "/user1.keytab"; Configuration conf=new HdfsConfiguration(); conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION,"kerberos"); conf.set(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY,"nn1/localhost@EXAMPLE.COM"); conf.set(DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY,nn1KeytabPath); cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_OF_DATANODES).build(); final MiniDFSCluster clusterRef=cluster; cluster.waitActive(); FileSystem fsForCurrentUser=cluster.getFileSystem(); fsForCurrentUser.mkdirs(new Path("/tmp")); fsForCurrentUser.setPermission(new Path("/tmp"),new FsPermission((short)511)); UserGroupInformation ugi=UserGroupInformation.loginUserFromKeytabAndReturnUGI("user1@EXAMPLE.COM",user1KeyTabPath); FileSystem fs=ugi.doAs(new PrivilegedExceptionAction(){ @Override public FileSystem run() throws Exception { return clusterRef.getFileSystem(); } } ); try { Path p=new Path("/users"); fs.mkdirs(p); fail("user1 must not be allowed to write in /"); } catch ( IOException expected) { } Path p=new Path("/tmp/alpha"); fs.mkdirs(p); assertNotNull(fs.listStatus(p)); assertEquals(AuthenticationMethod.KERBEROS,ugi.getAuthenticationMethod()); } finally { if (cluster != null) { cluster.shutdown(); } } }

TestInitializer AssumptionSetter HybridVerifier 
@Before public void testKdcRunning(){ Assume.assumeTrue(TestUGIWithSecurityOn.isKdcRunning()); }

Class: org.apache.hadoop.hdfs.server.namenode.TestSecureNameNodeWithExternalKdc

APIUtilityVerifier UtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testSecureNameNode() throws IOException, InterruptedException { MiniDFSCluster cluster=null; try { String nnPrincipal=System.getProperty("dfs.namenode.kerberos.principal"); String nnSpnegoPrincipal=System.getProperty("dfs.namenode.kerberos.internal.spnego.principal"); String nnKeyTab=System.getProperty("dfs.namenode.keytab.file"); assertNotNull("NameNode principal was not specified",nnPrincipal); assertNotNull("NameNode SPNEGO principal was not specified",nnSpnegoPrincipal); assertNotNull("NameNode keytab was not specified",nnKeyTab); Configuration conf=new HdfsConfiguration(); conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION,"kerberos"); conf.set(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY,nnPrincipal); conf.set(DFSConfigKeys.DFS_NAMENODE_KERBEROS_INTERNAL_SPNEGO_PRINCIPAL_KEY,nnSpnegoPrincipal); conf.set(DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY,nnKeyTab); cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_OF_DATANODES).build(); final MiniDFSCluster clusterRef=cluster; cluster.waitActive(); FileSystem fsForCurrentUser=cluster.getFileSystem(); fsForCurrentUser.mkdirs(new Path("/tmp")); fsForCurrentUser.setPermission(new Path("/tmp"),new FsPermission((short)511)); String userPrincipal=System.getProperty("user.principal"); String userKeyTab=System.getProperty("user.keytab"); assertNotNull("User principal was not specified",userPrincipal); assertNotNull("User keytab was not specified",userKeyTab); UserGroupInformation ugi=UserGroupInformation.loginUserFromKeytabAndReturnUGI(userPrincipal,userKeyTab); FileSystem fs=ugi.doAs(new PrivilegedExceptionAction(){ @Override public FileSystem run() throws Exception { return clusterRef.getFileSystem(); } } ); try { Path p=new Path("/users"); fs.mkdirs(p); fail("User must not be allowed to write in /"); } catch ( IOException expected) { } Path p=new Path("/tmp/alpha"); fs.mkdirs(p); assertNotNull(fs.listStatus(p)); assertEquals(AuthenticationMethod.KERBEROS,ugi.getAuthenticationMethod()); } finally { if (cluster != null) { cluster.shutdown(); } } }

TestInitializer AssumptionSetter HybridVerifier 
@Before public void testExternalKdcRunning(){ Assume.assumeTrue(isExternalKdcRunning()); }

Class: org.apache.hadoop.hdfs.server.namenode.TestSnapshotPathINodes

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
/** * Test {@link INodeDirectory#getExistingPathINodes(byte[][],int,boolean)} * for snapshot file while modifying file after snapshot. */ @Test(timeout=15000) public void testSnapshotPathINodesAfterModification() throws Exception { String[] names=INode.getPathNames(file1.toString()); byte[][] components=INode.getPathComponents(names); INodesInPath nodesInPath=INodesInPath.resolve(fsdir.rootDir,components); INode[] inodes=nodesInPath.getINodes(); assertEquals(inodes.length,components.length); assertEquals(inodes[components.length - 1].getFullPathName(),file1.toString()); final long modTime=inodes[inodes.length - 1].getModificationTime(); hdfs.allowSnapshot(sub1); hdfs.createSnapshot(sub1,"s3"); DFSTestUtil.appendFile(hdfs,file1,"the content for appending"); String snapshotPath=sub1.toString() + "/.snapshot/s3/file1"; names=INode.getPathNames(snapshotPath); components=INode.getPathComponents(names); INodesInPath ssNodesInPath=INodesInPath.resolve(fsdir.rootDir,components); INode[] ssInodes=ssNodesInPath.getINodes(); assertEquals(ssInodes.length,components.length - 1); final Snapshot s3=getSnapshot(ssNodesInPath,"s3"); assertSnapshot(ssNodesInPath,true,s3,3); INode snapshotFileNode=ssInodes[ssInodes.length - 1]; assertEquals(snapshotFileNode.getLocalName(),file1.getName()); assertTrue(snapshotFileNode.asFile().isWithSnapshot()); assertEquals(modTime,snapshotFileNode.getModificationTime(ssNodesInPath.getPathSnapshotId())); names=INode.getPathNames(file1.toString()); components=INode.getPathComponents(names); INodesInPath newNodesInPath=INodesInPath.resolve(fsdir.rootDir,components); assertSnapshot(newNodesInPath,false,s3,-1); INode[] newInodes=newNodesInPath.getINodes(); assertEquals(newInodes.length,components.length); final int last=components.length - 1; assertEquals(newInodes[last].getFullPathName(),file1.toString()); Assert.assertFalse(modTime == newInodes[last].getModificationTime()); hdfs.deleteSnapshot(sub1,"s3"); hdfs.disallowSnapshot(sub1); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
/** * Test {@link INodeDirectory#getExistingPathINodes(byte[][],int,boolean)} * for normal (non-snapshot) file. */ @Test(timeout=15000) public void testNonSnapshotPathINodes() throws Exception { String[] names=INode.getPathNames(file1.toString()); byte[][] components=INode.getPathComponents(names); INodesInPath nodesInPath=INodesInPath.resolve(fsdir.rootDir,components); INode[] inodes=nodesInPath.getINodes(); assertEquals(inodes.length,components.length); assertSnapshot(nodesInPath,false,null,-1); assertTrue("file1=" + file1 + ", nodesInPath="+ nodesInPath,inodes[components.length - 1] != null); assertEquals(inodes[components.length - 1].getFullPathName(),file1.toString()); assertEquals(inodes[components.length - 2].getFullPathName(),sub1.toString()); assertEquals(inodes[components.length - 3].getFullPathName(),dir.toString()); nodesInPath=INodesInPath.resolve(fsdir.rootDir,components,1,false); inodes=nodesInPath.getINodes(); assertEquals(inodes.length,1); assertSnapshot(nodesInPath,false,null,-1); assertEquals(inodes[0].getFullPathName(),file1.toString()); nodesInPath=INodesInPath.resolve(fsdir.rootDir,components,2,false); inodes=nodesInPath.getINodes(); assertEquals(inodes.length,2); assertSnapshot(nodesInPath,false,null,-1); assertEquals(inodes[1].getFullPathName(),file1.toString()); assertEquals(inodes[0].getFullPathName(),sub1.toString()); }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
/** * Test {@link INodeDirectory#getExistingPathINodes(byte[][],int,boolean)} * for snapshot file while adding a new file after snapshot. */ @Test(timeout=15000) public void testSnapshotPathINodesWithAddedFile() throws Exception { hdfs.allowSnapshot(sub1); hdfs.createSnapshot(sub1,"s4"); final Path file3=new Path(sub1,"file3"); DFSTestUtil.createFile(hdfs,file3,1024,REPLICATION,seed); { String snapshotPath=sub1.toString() + "/.snapshot/s4/file3"; String[] names=INode.getPathNames(snapshotPath); byte[][] components=INode.getPathComponents(names); INodesInPath nodesInPath=INodesInPath.resolve(fsdir.rootDir,components); INode[] inodes=nodesInPath.getINodes(); assertEquals(inodes.length,components.length - 1); assertEquals(nodesInPath.getNumNonNull(),components.length - 2); s4=getSnapshot(nodesInPath,"s4"); assertSnapshot(nodesInPath,true,s4,3); assertNull(inodes[inodes.length - 1]); } String[] names=INode.getPathNames(file3.toString()); byte[][] components=INode.getPathComponents(names); INodesInPath nodesInPath=INodesInPath.resolve(fsdir.rootDir,components); INode[] inodes=nodesInPath.getINodes(); assertEquals(inodes.length,components.length); assertSnapshot(nodesInPath,false,s4,-1); assertEquals(inodes[components.length - 1].getFullPathName(),file3.toString()); assertEquals(inodes[components.length - 2].getFullPathName(),sub1.toString()); assertEquals(inodes[components.length - 3].getFullPathName(),dir.toString()); hdfs.deleteSnapshot(sub1,"s4"); hdfs.disallowSnapshot(sub1); }

APIUtilityVerifier IterativeVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
/** * Test {@link INodeDirectory#getExistingPathINodes(byte[][],int,boolean)} * for snapshot file. */ @Test(timeout=15000) public void testSnapshotPathINodes() throws Exception { hdfs.allowSnapshot(sub1); hdfs.createSnapshot(sub1,"s1"); String snapshotPath=sub1.toString() + "/.snapshot/s1/file1"; String[] names=INode.getPathNames(snapshotPath); byte[][] components=INode.getPathComponents(names); INodesInPath nodesInPath=INodesInPath.resolve(fsdir.rootDir,components); INode[] inodes=nodesInPath.getINodes(); assertEquals(inodes.length,components.length - 1); final Snapshot snapshot=getSnapshot(nodesInPath,"s1"); assertSnapshot(nodesInPath,true,snapshot,3); INode snapshotFileNode=inodes[inodes.length - 1]; assertINodeFile(snapshotFileNode,file1); assertTrue(snapshotFileNode.getParent().isWithSnapshot()); nodesInPath=INodesInPath.resolve(fsdir.rootDir,components,1,false); inodes=nodesInPath.getINodes(); assertEquals(inodes.length,1); assertSnapshot(nodesInPath,true,snapshot,-1); assertINodeFile(nodesInPath.getLastINode(),file1); nodesInPath=INodesInPath.resolve(fsdir.rootDir,components,2,false); inodes=nodesInPath.getINodes(); assertEquals(inodes.length,2); assertSnapshot(nodesInPath,true,snapshot,0); assertINodeFile(nodesInPath.getLastINode(),file1); String dotSnapshotPath=sub1.toString() + "/.snapshot"; names=INode.getPathNames(dotSnapshotPath); components=INode.getPathComponents(names); nodesInPath=INodesInPath.resolve(fsdir.rootDir,components); inodes=nodesInPath.getINodes(); assertEquals(inodes.length,components.length - 1); assertSnapshot(nodesInPath,true,snapshot,-1); final INode last=nodesInPath.getLastINode(); assertEquals(last.getFullPathName(),sub1.toString()); assertFalse(last instanceof INodeFile); String[] invalidPathComponent={"invalidDir","foo",".snapshot","bar"}; Path invalidPath=new Path(invalidPathComponent[0]); for (int i=1; i < invalidPathComponent.length; i++) { invalidPath=new Path(invalidPath,invalidPathComponent[i]); try { hdfs.getFileStatus(invalidPath); Assert.fail(); } catch ( FileNotFoundException fnfe) { System.out.println("The exception is expected: " + fnfe); } } hdfs.deleteSnapshot(sub1,"s1"); hdfs.disallowSnapshot(sub1); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
/** * Test {@link INodeDirectory#getExistingPathINodes(byte[][],int,boolean)} * for snapshot file after deleting the original file. */ @Test(timeout=15000) public void testSnapshotPathINodesAfterDeletion() throws Exception { hdfs.allowSnapshot(sub1); hdfs.createSnapshot(sub1,"s2"); hdfs.delete(file1,false); final Snapshot snapshot; { String snapshotPath=sub1.toString() + "/.snapshot/s2/file1"; String[] names=INode.getPathNames(snapshotPath); byte[][] components=INode.getPathComponents(names); INodesInPath nodesInPath=INodesInPath.resolve(fsdir.rootDir,components); INode[] inodes=nodesInPath.getINodes(); assertEquals(inodes.length,components.length - 1); snapshot=getSnapshot(nodesInPath,"s2"); assertSnapshot(nodesInPath,true,snapshot,3); final INode inode=inodes[inodes.length - 1]; assertEquals(file1.getName(),inode.getLocalName()); assertTrue(inode.asFile().isWithSnapshot()); } String[] names=INode.getPathNames(file1.toString()); byte[][] components=INode.getPathComponents(names); INodesInPath nodesInPath=INodesInPath.resolve(fsdir.rootDir,components); INode[] inodes=nodesInPath.getINodes(); assertEquals(inodes.length,components.length); assertEquals(nodesInPath.getNumNonNull(),components.length - 1); assertSnapshot(nodesInPath,false,snapshot,-1); assertNull(inodes[components.length - 1]); assertEquals(inodes[components.length - 2].getFullPathName(),sub1.toString()); assertEquals(inodes[components.length - 3].getFullPathName(),dir.toString()); hdfs.deleteSnapshot(sub1,"s2"); hdfs.disallowSnapshot(sub1); }

Class: org.apache.hadoop.hdfs.server.namenode.TestStartup

UtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * secnn-7 * secondary node copies fsimage and edits into correct separate directories. * @throws IOException */ @Test public void testSNNStartup() throws IOException { LOG.info("--starting SecondNN startup test"); config.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,fileAsURI(new File(hdfsDir,"name")).toString()); config.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,fileAsURI(new File(hdfsDir,"name")).toString()); config.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY,fileAsURI(new File(hdfsDir,"chkpt_edits")).toString()); config.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY,fileAsURI(new File(hdfsDir,"chkpt")).toString()); LOG.info("--starting NN "); MiniDFSCluster cluster=null; SecondaryNameNode sn=null; NameNode nn=null; try { cluster=new MiniDFSCluster.Builder(config).manageDataDfsDirs(false).manageNameDfsDirs(false).build(); cluster.waitActive(); nn=cluster.getNameNode(); assertNotNull(nn); LOG.info("--starting SecondNN"); sn=new SecondaryNameNode(config); assertNotNull(sn); LOG.info("--doing checkpoint"); sn.doCheckpoint(); LOG.info("--done checkpoint"); FSImage image=nn.getFSImage(); StorageDirectory sd=image.getStorage().getStorageDir(0); assertEquals(sd.getStorageDirType(),NameNodeDirType.IMAGE_AND_EDITS); image.getStorage(); File imf=NNStorage.getStorageFile(sd,NameNodeFile.IMAGE,0); image.getStorage(); File edf=NNStorage.getStorageFile(sd,NameNodeFile.EDITS,0); LOG.info("--image file " + imf.getAbsolutePath() + "; len = "+ imf.length()); LOG.info("--edits file " + edf.getAbsolutePath() + "; len = "+ edf.length()); FSImage chkpImage=sn.getFSImage(); verifyDifferentDirs(chkpImage,imf.length(),edf.length()); } catch ( IOException e) { fail(StringUtils.stringifyException(e)); System.err.println("checkpoint failed"); throw e; } finally { if (sn != null) sn.shutdown(); if (cluster != null) cluster.shutdown(); } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test(timeout=120000) public void testXattrConfiguration() throws Exception { Configuration conf=new HdfsConfiguration(); MiniDFSCluster cluster=null; try { conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_KEY,-1); cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true).build(); fail("Expected exception with negative xattr size"); } catch ( IllegalArgumentException e) { GenericTestUtils.assertExceptionContains("Cannot set a negative value for the maximum size of an xattr",e); } finally { conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_KEY,DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_DEFAULT); if (cluster != null) { cluster.shutdown(); } } try { conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY,-1); cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true).build(); fail("Expected exception with negative # xattrs per inode"); } catch ( IllegalArgumentException e) { GenericTestUtils.assertExceptionContains("Cannot set a negative limit on the number of xattrs per inode",e); } finally { conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY,DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_DEFAULT); if (cluster != null) { cluster.shutdown(); } } try { final LogVerificationAppender appender=new LogVerificationAppender(); final Logger logger=Logger.getRootLogger(); logger.addAppender(appender); int count=appender.countLinesWithMessage("Maximum size of an xattr: 0 (unlimited)"); assertEquals("Expected no messages about unlimited xattr size",0,count); conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_KEY,0); cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true).build(); count=appender.countLinesWithMessage("Maximum size of an xattr: 0 (unlimited)"); assertEquals("Expected unlimited xattr size",2,count); } finally { conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_KEY,DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_DEFAULT); if (cluster != null) { cluster.shutdown(); } } }

UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * This test tests hosts include list contains host names. After namenode * restarts, the still alive datanodes should not have any trouble in getting * registrant again. */ @Test public void testNNRestart() throws IOException, InterruptedException { MiniDFSCluster cluster=null; FileSystem localFileSys; Path hostsFile; Path excludeFile; int HEARTBEAT_INTERVAL=1; localFileSys=FileSystem.getLocal(config); Path workingDir=localFileSys.getWorkingDirectory(); Path dir=new Path(workingDir,"build/test/data/work-dir/restartnn"); hostsFile=new Path(dir,"hosts"); excludeFile=new Path(dir,"exclude"); config.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE,excludeFile.toUri().getPath()); writeConfigFile(localFileSys,excludeFile,null); config.set(DFSConfigKeys.DFS_HOSTS,hostsFile.toUri().getPath()); ArrayList list=new ArrayList(); byte b[]={127,0,0,1}; InetAddress inetAddress=InetAddress.getByAddress(b); list.add(inetAddress.getHostName()); writeConfigFile(localFileSys,hostsFile,list); int numDatanodes=1; try { cluster=new MiniDFSCluster.Builder(config).numDataNodes(numDatanodes).setupHostsFile(true).build(); cluster.waitActive(); cluster.restartNameNode(); NamenodeProtocols nn=cluster.getNameNodeRpc(); assertNotNull(nn); assertTrue(cluster.isDataNodeUp()); DatanodeInfo[] info=nn.getDatanodeReport(DatanodeReportType.LIVE); for (int i=0; i < 5 && info.length != numDatanodes; i++) { Thread.sleep(HEARTBEAT_INTERVAL * 1000); info=nn.getDatanodeReport(DatanodeReportType.LIVE); } assertEquals("Number of live nodes should be " + numDatanodes,numDatanodes,info.length); } catch ( IOException e) { fail(StringUtils.stringifyException(e)); throw e; } finally { cleanupFile(localFileSys,excludeFile.getParent()); if (cluster != null) { cluster.shutdown(); } } }

Class: org.apache.hadoop.hdfs.server.namenode.TestStartupProgressServlet

NullVerifier EqualityVerifier HybridVerifier 
@Test public void testRunningState() throws Exception { setStartupProgressForRunningState(startupProgress); String respBody=doGetAndReturnResponseBody(); assertNotNull(respBody); Map expected=ImmutableMap.builder().put("percentComplete",0.375f).put("phases",Arrays.asList(ImmutableMap.builder().put("name","LoadingFsImage").put("desc","Loading fsimage").put("status","COMPLETE").put("percentComplete",1.0f).put("steps",Collections.singletonList(ImmutableMap.builder().put("name","Inodes").put("desc","inodes").put("count",100L).put("total",100L).put("percentComplete",1.0f).build())).build(),ImmutableMap.builder().put("name","LoadingEdits").put("desc","Loading edits").put("status","RUNNING").put("percentComplete",0.5f).put("steps",Collections.singletonList(ImmutableMap.builder().put("count",100L).put("file","file").put("size",1000L).put("total",200L).put("percentComplete",0.5f).build())).build(),ImmutableMap.builder().put("name","SavingCheckpoint").put("desc","Saving checkpoint").put("status","PENDING").put("percentComplete",0.0f).put("steps",Collections.emptyList()).build(),ImmutableMap.builder().put("name","SafeMode").put("desc","Safe mode").put("status","PENDING").put("percentComplete",0.0f).put("steps",Collections.emptyList()).build())).build(); assertEquals(JSON.toString(expected),filterJson(respBody)); }

NullVerifier EqualityVerifier HybridVerifier 
@Test public void testInitialState() throws Exception { String respBody=doGetAndReturnResponseBody(); assertNotNull(respBody); Map expected=ImmutableMap.builder().put("percentComplete",0.0f).put("phases",Arrays.asList(ImmutableMap.builder().put("name","LoadingFsImage").put("desc","Loading fsimage").put("status","PENDING").put("percentComplete",0.0f).put("steps",Collections.emptyList()).build(),ImmutableMap.builder().put("name","LoadingEdits").put("desc","Loading edits").put("status","PENDING").put("percentComplete",0.0f).put("steps",Collections.emptyList()).build(),ImmutableMap.builder().put("name","SavingCheckpoint").put("desc","Saving checkpoint").put("status","PENDING").put("percentComplete",0.0f).put("steps",Collections.emptyList()).build(),ImmutableMap.builder().put("name","SafeMode").put("desc","Safe mode").put("status","PENDING").put("percentComplete",0.0f).put("steps",Collections.emptyList()).build())).build(); assertEquals(JSON.toString(expected),filterJson(respBody)); }

NullVerifier EqualityVerifier HybridVerifier 
@Test public void testFinalState() throws Exception { setStartupProgressForFinalState(startupProgress); String respBody=doGetAndReturnResponseBody(); assertNotNull(respBody); Map expected=ImmutableMap.builder().put("percentComplete",1.0f).put("phases",Arrays.asList(ImmutableMap.builder().put("name","LoadingFsImage").put("desc","Loading fsimage").put("status","COMPLETE").put("percentComplete",1.0f).put("steps",Collections.singletonList(ImmutableMap.builder().put("name","Inodes").put("desc","inodes").put("count",100L).put("total",100L).put("percentComplete",1.0f).build())).build(),ImmutableMap.builder().put("name","LoadingEdits").put("desc","Loading edits").put("status","COMPLETE").put("percentComplete",1.0f).put("steps",Collections.singletonList(ImmutableMap.builder().put("count",200L).put("file","file").put("size",1000L).put("total",200L).put("percentComplete",1.0f).build())).build(),ImmutableMap.builder().put("name","SavingCheckpoint").put("desc","Saving checkpoint").put("status","COMPLETE").put("percentComplete",1.0f).put("steps",Collections.singletonList(ImmutableMap.builder().put("name","Inodes").put("desc","inodes").put("count",300L).put("total",300L).put("percentComplete",1.0f).build())).build(),ImmutableMap.builder().put("name","SafeMode").put("desc","Safe mode").put("status","COMPLETE").put("percentComplete",1.0f).put("steps",Collections.singletonList(ImmutableMap.builder().put("name","AwaitingReportedBlocks").put("desc","awaiting reported blocks").put("count",400L).put("total",400L).put("percentComplete",1.0f).build())).build())).build(); assertEquals(JSON.toString(expected),filterJson(respBody)); }

Class: org.apache.hadoop.hdfs.server.namenode.TestStorageRestore

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test dfsadmin -restoreFailedStorage command * @throws Exception */ @Test public void testDfsAdminCmd() throws Exception { cluster=new MiniDFSCluster.Builder(config).numDataNodes(2).manageNameDfsDirs(false).build(); cluster.waitActive(); try { FSImage fsi=cluster.getNameNode().getFSImage(); boolean restore=fsi.getStorage().getRestoreFailedStorage(); LOG.info("Restore is " + restore); assertEquals(restore,true); String cmd="-fs NAMENODE -restoreFailedStorage false"; String namenode=config.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY,"file:///"); CommandExecutor executor=new CLITestCmdDFS(cmd,new CLICommandDFSAdmin()).getExecutor(namenode); executor.executeCommand(cmd); restore=fsi.getStorage().getRestoreFailedStorage(); assertFalse("After set true call restore is " + restore,restore); cmd="-fs NAMENODE -restoreFailedStorage true"; executor.executeCommand(cmd); restore=fsi.getStorage().getRestoreFailedStorage(); assertTrue("After set false call restore is " + restore,restore); cmd="-fs NAMENODE -restoreFailedStorage check"; CommandExecutor.Result cmdResult=executor.executeCommand(cmd); restore=fsi.getStorage().getRestoreFailedStorage(); assertTrue("After check call restore is " + restore,restore); String commandOutput=cmdResult.getCommandOutput(); commandOutput.trim(); assertTrue(commandOutput.contains("restoreFailedStorage is set to true")); } finally { cluster.shutdown(); } }

Class: org.apache.hadoop.hdfs.server.namenode.TestTransferFsImage

UtilityVerifier BooleanVerifier HybridVerifier 
/** * Regression test for HDFS-1997. Test that, if an exception * occurs on the client side, it is properly reported as such, * and reported to the associated NNStorage object. */ @Test public void testClientSideException() throws IOException { Configuration conf=new HdfsConfiguration(); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).build(); NNStorage mockStorage=Mockito.mock(NNStorage.class); List localPath=Collections.singletonList(new File("/xxxxx-does-not-exist/blah")); try { URL fsName=DFSUtil.getInfoServer(cluster.getNameNode().getServiceRpcAddress(),conf,DFSUtil.getHttpClientScheme(conf)).toURL(); String id="getimage=1&txid=0"; TransferFsImage.getFileClient(fsName,id,localPath,mockStorage,false); fail("Didn't get an exception!"); } catch ( IOException ioe) { Mockito.verify(mockStorage).reportErrorOnFile(localPath.get(0)); assertTrue("Unexpected exception: " + StringUtils.stringifyException(ioe),ioe.getMessage().contains("Unable to download to any storage")); } finally { cluster.shutdown(); } }

UtilityVerifier EqualityVerifier HybridVerifier 
/** * Test to verify the timeout of Image upload */ @Test(timeout=10000) public void testImageUploadTimeout() throws Exception { Configuration conf=new HdfsConfiguration(); NNStorage mockStorage=Mockito.mock(NNStorage.class); HttpServer2 testServer=HttpServerFunctionalTest.createServer("hdfs"); try { testServer.addServlet("ImageTransfer",ImageServlet.PATH_SPEC,TestImageTransferServlet.class); testServer.start(); URL serverURL=HttpServerFunctionalTest.getServerURL(testServer); TransferFsImage.timeout=2000; File tmpDir=new File(new FileSystemTestHelper().getTestRootDir()); tmpDir.mkdirs(); File mockImageFile=File.createTempFile("image","",tmpDir); FileOutputStream imageFile=new FileOutputStream(mockImageFile); imageFile.write("data".getBytes()); imageFile.close(); Mockito.when(mockStorage.findImageFile(Mockito.any(NameNodeFile.class),Mockito.anyLong())).thenReturn(mockImageFile); Mockito.when(mockStorage.toColonSeparatedString()).thenReturn("storage:info:string"); try { TransferFsImage.uploadImageFromStorage(serverURL,conf,mockStorage,NameNodeFile.IMAGE,1L); fail("TransferImage Should fail with timeout"); } catch ( SocketTimeoutException e) { assertEquals("Upload should timeout","Read timed out",e.getMessage()); } } finally { testServer.stop(); } }

UtilityVerifier EqualityVerifier HybridVerifier 
/** * Test to verify the read timeout */ @Test(timeout=5000) public void testGetImageTimeout() throws Exception { HttpServer2 testServer=HttpServerFunctionalTest.createServer("hdfs"); try { testServer.addServlet("ImageTransfer",ImageServlet.PATH_SPEC,TestImageTransferServlet.class); testServer.start(); URL serverURL=HttpServerFunctionalTest.getServerURL(testServer); TransferFsImage.timeout=2000; try { TransferFsImage.getFileClient(serverURL,"txid=1",null,null,false); fail("TransferImage Should fail with timeout"); } catch ( SocketTimeoutException e) { assertEquals("Read should timeout","Read timed out",e.getMessage()); } } finally { if (testServer != null) { testServer.stop(); } } }

Class: org.apache.hadoop.hdfs.server.namenode.ha.TestBootstrapStandby

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test for the case where the shared edits dir doesn't have * all of the recent edit logs. */ @Test public void testSharedEditsMissingLogs() throws Exception { removeStandbyNameDirs(); CheckpointSignature sig=nn0.getRpcServer().rollEditLog(); assertEquals(3,sig.getCurSegmentTxId()); URI editsUri=cluster.getSharedEditsDir(0,1); File editsDir=new File(editsUri); File editsSegment=new File(new File(editsDir,"current"),NNStorage.getFinalizedEditsFileName(1,2)); GenericTestUtils.assertExists(editsSegment); assertTrue(editsSegment.delete()); LogCapturer logs=GenericTestUtils.LogCapturer.captureLogs(LogFactory.getLog(BootstrapStandby.class)); try { int rc=BootstrapStandby.run(new String[]{"-force"},cluster.getConfiguration(1)); assertEquals(BootstrapStandby.ERR_CODE_LOGS_UNAVAILABLE,rc); } finally { logs.stopCapturing(); } GenericTestUtils.assertMatches(logs.getOutput(),"FATAL.*Unable to read transaction ids 1-3 from the configured shared"); }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
/** * Test for the base success case. The primary NN * hasn't made any checkpoints, and we copy the fsimage_0 * file over and start up. */ @Test public void testSuccessfulBaseCase() throws Exception { removeStandbyNameDirs(); try { cluster.restartNameNode(1); fail("Did not throw"); } catch ( IOException ioe) { GenericTestUtils.assertExceptionContains("storage directory does not exist or is not accessible",ioe); } int rc=BootstrapStandby.run(new String[]{"-nonInteractive"},cluster.getConfiguration(1)); assertEquals(0,rc); FSImageTestUtil.assertNNHasCheckpoints(cluster,1,ImmutableList.of(0)); FSImageTestUtil.assertNNFilesMatch(cluster); cluster.restartNameNode(1); }

Class: org.apache.hadoop.hdfs.server.namenode.ha.TestDFSUpgradeWithHA

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
/** * Make sure that starting a second NN with the -upgrade flag fails if the * other NN has already done that. */ @Test public void testCannotUpgradeSecondNameNode() throws IOException, URISyntaxException { MiniDFSCluster cluster=null; FileSystem fs=null; try { cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0).build(); File sharedDir=new File(cluster.getSharedEditsDir(0,1)); checkClusterPreviousDirExistence(cluster,false); assertCTimesEqual(cluster); checkPreviousDirExistence(sharedDir,false); cluster.transitionToActive(0); fs=HATestUtil.configureFailoverFs(cluster,conf); assertTrue(fs.mkdirs(new Path("/foo1"))); cluster.shutdownNameNode(1); cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE); cluster.restartNameNode(0,false); checkNnPreviousDirExistence(cluster,0,true); checkNnPreviousDirExistence(cluster,1,false); checkPreviousDirExistence(sharedDir,true); assertTrue(fs.mkdirs(new Path("/foo2"))); cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.REGULAR); cluster.restartNameNode(0,false); cluster.transitionToActive(0); assertTrue(fs.mkdirs(new Path("/foo3"))); cluster.getNameNodeInfos()[1].setStartOpt(StartupOption.UPGRADE); try { cluster.restartNameNode(1,false); fail("Should not have been able to start second NN with -upgrade"); } catch ( IOException ioe) { GenericTestUtils.assertExceptionContains("It looks like the shared log is already being upgraded",ioe); } } finally { if (fs != null) { fs.close(); } if (cluster != null) { cluster.shutdown(); } } }

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Ensure that an admin cannot finalize an HA upgrade without at least one NN * being active. */ @Test public void testCannotFinalizeIfNoActive() throws IOException, URISyntaxException { MiniDFSCluster cluster=null; FileSystem fs=null; try { cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0).build(); File sharedDir=new File(cluster.getSharedEditsDir(0,1)); checkClusterPreviousDirExistence(cluster,false); assertCTimesEqual(cluster); checkPreviousDirExistence(sharedDir,false); cluster.transitionToActive(0); fs=HATestUtil.configureFailoverFs(cluster,conf); assertTrue(fs.mkdirs(new Path("/foo1"))); cluster.shutdownNameNode(1); cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE); cluster.restartNameNode(0,false); checkNnPreviousDirExistence(cluster,0,true); checkNnPreviousDirExistence(cluster,1,false); checkPreviousDirExistence(sharedDir,true); assertTrue(fs.mkdirs(new Path("/foo2"))); cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.REGULAR); cluster.restartNameNode(0,false); cluster.transitionToActive(0); assertTrue(fs.mkdirs(new Path("/foo3"))); int rc=BootstrapStandby.run(new String[]{"-force"},cluster.getConfiguration(1)); assertEquals(0,rc); cluster.restartNameNode(1); cluster.transitionToStandby(0); cluster.transitionToActive(1); assertTrue(fs.mkdirs(new Path("/foo4"))); assertCTimesEqual(cluster); cluster.transitionToStandby(1); try { runFinalizeCommand(cluster); fail("Should not have been able to finalize upgrade with no NN active"); } catch ( IOException ioe) { GenericTestUtils.assertExceptionContains("Cannot finalize with no NameNode active",ioe); } } finally { if (fs != null) { fs.close(); } if (cluster != null) { cluster.shutdown(); } } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Make sure that an HA NN with NFS-based HA can successfully start and * upgrade. */ @Test public void testNfsUpgrade() throws IOException, URISyntaxException { MiniDFSCluster cluster=null; FileSystem fs=null; try { cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0).build(); File sharedDir=new File(cluster.getSharedEditsDir(0,1)); checkClusterPreviousDirExistence(cluster,false); assertCTimesEqual(cluster); checkPreviousDirExistence(sharedDir,false); cluster.transitionToActive(0); fs=HATestUtil.configureFailoverFs(cluster,conf); assertTrue(fs.mkdirs(new Path("/foo1"))); cluster.shutdownNameNode(1); cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE); cluster.restartNameNode(0,false); checkNnPreviousDirExistence(cluster,0,true); checkNnPreviousDirExistence(cluster,1,false); checkPreviousDirExistence(sharedDir,true); assertTrue(fs.mkdirs(new Path("/foo2"))); cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.REGULAR); cluster.restartNameNode(0,false); cluster.transitionToActive(0); assertTrue(fs.mkdirs(new Path("/foo3"))); int rc=BootstrapStandby.run(new String[]{"-force"},cluster.getConfiguration(1)); assertEquals(0,rc); cluster.restartNameNode(1); cluster.transitionToStandby(0); cluster.transitionToActive(1); assertTrue(fs.mkdirs(new Path("/foo4"))); assertCTimesEqual(cluster); } finally { if (fs != null) { fs.close(); } if (cluster != null) { cluster.shutdown(); } } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testFinalizeWithJournalNodes() throws IOException, URISyntaxException { MiniQJMHACluster qjCluster=null; FileSystem fs=null; try { Builder builder=new MiniQJMHACluster.Builder(conf); builder.getDfsBuilder().numDataNodes(0); qjCluster=builder.build(); MiniDFSCluster cluster=qjCluster.getDfsCluster(); checkJnPreviousDirExistence(qjCluster,false); checkClusterPreviousDirExistence(cluster,false); assertCTimesEqual(cluster); cluster.transitionToActive(0); fs=HATestUtil.configureFailoverFs(cluster,conf); assertTrue(fs.mkdirs(new Path("/foo1"))); cluster.shutdownNameNode(1); cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE); cluster.restartNameNode(0,false); assertTrue(fs.mkdirs(new Path("/foo2"))); checkNnPreviousDirExistence(cluster,0,true); checkNnPreviousDirExistence(cluster,1,false); checkJnPreviousDirExistence(qjCluster,true); int rc=BootstrapStandby.run(new String[]{"-force"},cluster.getConfiguration(1)); assertEquals(0,rc); cluster.restartNameNode(1); runFinalizeCommand(cluster); checkClusterPreviousDirExistence(cluster,false); checkJnPreviousDirExistence(qjCluster,false); assertCTimesEqual(cluster); } finally { if (fs != null) { fs.close(); } if (qjCluster != null) { qjCluster.shutdown(); } } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Make sure that an HA NN can successfully upgrade when configured using * JournalNodes. */ @Test public void testUpgradeWithJournalNodes() throws IOException, URISyntaxException { MiniQJMHACluster qjCluster=null; FileSystem fs=null; try { Builder builder=new MiniQJMHACluster.Builder(conf); builder.getDfsBuilder().numDataNodes(0); qjCluster=builder.build(); MiniDFSCluster cluster=qjCluster.getDfsCluster(); checkJnPreviousDirExistence(qjCluster,false); checkClusterPreviousDirExistence(cluster,false); assertCTimesEqual(cluster); cluster.transitionToActive(0); fs=HATestUtil.configureFailoverFs(cluster,conf); assertTrue(fs.mkdirs(new Path("/foo1"))); cluster.shutdownNameNode(1); cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE); cluster.restartNameNode(0,false); checkNnPreviousDirExistence(cluster,0,true); checkNnPreviousDirExistence(cluster,1,false); checkJnPreviousDirExistence(qjCluster,true); assertTrue(fs.mkdirs(new Path("/foo2"))); cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.REGULAR); cluster.restartNameNode(0,false); cluster.transitionToActive(0); assertTrue(fs.mkdirs(new Path("/foo3"))); int rc=BootstrapStandby.run(new String[]{"-force"},cluster.getConfiguration(1)); assertEquals(0,rc); cluster.restartNameNode(1); cluster.transitionToStandby(0); cluster.transitionToActive(1); assertTrue(fs.mkdirs(new Path("/foo4"))); assertCTimesEqual(cluster); } finally { if (fs != null) { fs.close(); } if (qjCluster != null) { qjCluster.shutdown(); } } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test rollback with NFS shared dir. */ @Test public void testRollbackWithNfs() throws Exception { MiniDFSCluster cluster=null; FileSystem fs=null; try { cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0).build(); File sharedDir=new File(cluster.getSharedEditsDir(0,1)); checkClusterPreviousDirExistence(cluster,false); assertCTimesEqual(cluster); checkPreviousDirExistence(sharedDir,false); cluster.transitionToActive(0); fs=HATestUtil.configureFailoverFs(cluster,conf); assertTrue(fs.mkdirs(new Path("/foo1"))); cluster.shutdownNameNode(1); cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE); cluster.restartNameNode(0,false); checkNnPreviousDirExistence(cluster,0,true); checkNnPreviousDirExistence(cluster,1,false); checkPreviousDirExistence(sharedDir,true); assertTrue(fs.mkdirs(new Path("/foo2"))); int rc=BootstrapStandby.run(new String[]{"-force"},cluster.getConfiguration(1)); assertEquals(0,rc); cluster.restartNameNode(1); checkNnPreviousDirExistence(cluster,0,true); checkNnPreviousDirExistence(cluster,1,false); checkPreviousDirExistence(sharedDir,true); assertCTimesEqual(cluster); Collection nn1NameDirs=cluster.getNameDirs(0); cluster.shutdown(); conf.setStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,Joiner.on(",").join(nn1NameDirs)); NameNode.doRollback(conf,false); checkNnPreviousDirExistence(cluster,0,false); checkPreviousDirExistence(sharedDir,false); } finally { if (fs != null) { fs.close(); } if (cluster != null) { cluster.shutdown(); } } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testRollbackWithJournalNodes() throws IOException, URISyntaxException { MiniQJMHACluster qjCluster=null; FileSystem fs=null; try { Builder builder=new MiniQJMHACluster.Builder(conf); builder.getDfsBuilder().numDataNodes(0); qjCluster=builder.build(); MiniDFSCluster cluster=qjCluster.getDfsCluster(); checkClusterPreviousDirExistence(cluster,false); assertCTimesEqual(cluster); checkJnPreviousDirExistence(qjCluster,false); cluster.transitionToActive(0); fs=HATestUtil.configureFailoverFs(cluster,conf); assertTrue(fs.mkdirs(new Path("/foo1"))); cluster.shutdownNameNode(1); cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE); cluster.restartNameNode(0,false); checkNnPreviousDirExistence(cluster,0,true); checkNnPreviousDirExistence(cluster,1,false); checkJnPreviousDirExistence(qjCluster,true); assertTrue(fs.mkdirs(new Path("/foo2"))); int rc=BootstrapStandby.run(new String[]{"-force"},cluster.getConfiguration(1)); assertEquals(0,rc); cluster.restartNameNode(1); checkNnPreviousDirExistence(cluster,0,true); checkNnPreviousDirExistence(cluster,1,false); checkJnPreviousDirExistence(qjCluster,true); assertCTimesEqual(cluster); Collection nn1NameDirs=cluster.getNameDirs(0); cluster.shutdown(); conf.setStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,Joiner.on(",").join(nn1NameDirs)); NameNode.doRollback(conf,false); checkNnPreviousDirExistence(cluster,0,false); checkJnPreviousDirExistence(qjCluster,false); } finally { if (fs != null) { fs.close(); } if (qjCluster != null) { qjCluster.shutdown(); } } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Make sure that even if the NN which initiated the upgrade is in the standby * state that we're allowed to finalize. */ @Test public void testFinalizeFromSecondNameNodeWithJournalNodes() throws IOException, URISyntaxException { MiniQJMHACluster qjCluster=null; FileSystem fs=null; try { Builder builder=new MiniQJMHACluster.Builder(conf); builder.getDfsBuilder().numDataNodes(0); qjCluster=builder.build(); MiniDFSCluster cluster=qjCluster.getDfsCluster(); checkJnPreviousDirExistence(qjCluster,false); checkClusterPreviousDirExistence(cluster,false); assertCTimesEqual(cluster); cluster.transitionToActive(0); fs=HATestUtil.configureFailoverFs(cluster,conf); assertTrue(fs.mkdirs(new Path("/foo1"))); cluster.shutdownNameNode(1); cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE); cluster.restartNameNode(0,false); checkNnPreviousDirExistence(cluster,0,true); checkNnPreviousDirExistence(cluster,1,false); checkJnPreviousDirExistence(qjCluster,true); int rc=BootstrapStandby.run(new String[]{"-force"},cluster.getConfiguration(1)); assertEquals(0,rc); cluster.restartNameNode(1); cluster.transitionToStandby(0); cluster.transitionToActive(1); runFinalizeCommand(cluster); checkClusterPreviousDirExistence(cluster,false); checkJnPreviousDirExistence(qjCluster,false); assertCTimesEqual(cluster); } finally { if (fs != null) { fs.close(); } if (qjCluster != null) { qjCluster.shutdown(); } } }

Class: org.apache.hadoop.hdfs.server.namenode.ha.TestDFSZKFailoverController

TestInitializer InternalCallVerifier EqualityVerifier HybridVerifier 
@Before public void setup() throws Exception { conf=new Configuration(); conf.set(ZKFailoverController.ZK_QUORUM_KEY + ".ns1",hostPort); conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY,AlwaysSucceedFencer.class.getName()); conf.setBoolean(DFSConfigKeys.DFS_HA_AUTO_FAILOVER_ENABLED_KEY,true); conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,0); conf.setInt(DFSConfigKeys.DFS_HA_ZKFC_PORT_KEY + ".ns1.nn1",10023); conf.setInt(DFSConfigKeys.DFS_HA_ZKFC_PORT_KEY + ".ns1.nn2",10024); MiniDFSNNTopology topology=new MiniDFSNNTopology().addNameservice(new MiniDFSNNTopology.NSConf("ns1").addNN(new MiniDFSNNTopology.NNConf("nn1").setIpcPort(10021)).addNN(new MiniDFSNNTopology.NNConf("nn2").setIpcPort(10022))); cluster=new MiniDFSCluster.Builder(conf).nnTopology(topology).numDataNodes(0).build(); cluster.waitActive(); ctx=new TestContext(); ctx.addThread(thr1=new ZKFCThread(ctx,0)); assertEquals(0,thr1.zkfc.run(new String[]{"-formatZK"})); thr1.start(); waitForHAState(0,HAServiceState.ACTIVE); ctx.addThread(thr2=new ZKFCThread(ctx,1)); thr2.start(); ZKFCTestUtil.waitForHealthState(thr1.zkfc,HealthMonitor.State.SERVICE_HEALTHY,ctx); ZKFCTestUtil.waitForHealthState(thr2.zkfc,HealthMonitor.State.SERVICE_HEALTHY,ctx); fs=HATestUtil.configureFailoverFs(cluster,conf); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test that automatic failover is triggered by shutting the * active NN down. */ @Test(timeout=60000) public void testFailoverAndBackOnNNShutdown() throws Exception { Path p1=new Path("/dir1"); Path p2=new Path("/dir2"); fs.mkdirs(p1); cluster.shutdownNameNode(0); assertTrue(fs.exists(p1)); fs.mkdirs(p2); assertEquals(AlwaysSucceedFencer.getLastFencedService().getAddress(),thr1.zkfc.getLocalTarget().getAddress()); cluster.restartNameNode(0); waitForHAState(0,HAServiceState.STANDBY); assertTrue(fs.exists(p1)); assertTrue(fs.exists(p2)); cluster.shutdownNameNode(1); waitForHAState(0,HAServiceState.ACTIVE); assertTrue(fs.exists(p1)); assertTrue(fs.exists(p2)); assertEquals(AlwaysSucceedFencer.getLastFencedService().getAddress(),thr2.zkfc.getLocalTarget().getAddress()); }

Class: org.apache.hadoop.hdfs.server.namenode.ha.TestDelegationTokensWithHA

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
/** * Test if StandbyException can be thrown from StandbyNN, when it's requested for * password. (HDFS-6475). With StandbyException, the client can failover to try * activeNN. */ @Test public void testDelegationTokenStandbyNNAppearFirst() throws Exception { cluster.transitionToStandby(0); cluster.transitionToActive(1); final DelegationTokenSecretManager stSecretManager=NameNodeAdapter.getDtSecretManager(nn1.getNamesystem()); final Token token=getDelegationToken(fs,"JobTracker"); final DelegationTokenIdentifier identifier=new DelegationTokenIdentifier(); byte[] tokenId=token.getIdentifier(); identifier.readFields(new DataInputStream(new ByteArrayInputStream(tokenId))); assertTrue(null != stSecretManager.retrievePassword(identifier)); final UserGroupInformation ugi=UserGroupInformation.createRemoteUser("JobTracker"); ugi.addToken(token); ugi.doAs(new PrivilegedExceptionAction(){ @Override public Object run(){ try { try { byte[] tmppw=dtSecretManager.retrievePassword(identifier); fail("InvalidToken with cause StandbyException is expected" + " since nn0 is standby"); return tmppw; } catch ( IOException e) { throw new SecurityException("Failed to obtain user group information: " + e,e); } } catch ( Exception oe) { HttpServletResponse response=mock(HttpServletResponse.class); ExceptionHandler eh=new ExceptionHandler(); eh.initResponse(response); Response resp=eh.toResponse(oe); Map m=(Map)JSON.parse(resp.getEntity().toString()); RemoteException re=JsonUtil.toRemoteException(m); Exception unwrapped=((RemoteException)re).unwrapRemoteException(StandbyException.class); assertTrue(unwrapped instanceof StandbyException); return null; } } } ); }

UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
/** * Test if correct exception (StandbyException or RetriableException) can be * thrown during the NN failover. */ @Test public void testDelegationTokenDuringNNFailover() throws Exception { EditLogTailer editLogTailer=nn1.getNamesystem().getEditLogTailer(); editLogTailer.stop(); Configuration conf=(Configuration)Whitebox.getInternalState(editLogTailer,"conf"); nn1.getNamesystem().setEditLogTailerForTests(new EditLogTailerForTest(nn1.getNamesystem(),conf)); final Token token=getDelegationToken(fs,"JobTracker"); DelegationTokenIdentifier identifier=new DelegationTokenIdentifier(); byte[] tokenId=token.getIdentifier(); identifier.readFields(new DataInputStream(new ByteArrayInputStream(tokenId))); LOG.info("A valid token should have non-null password, " + "and should be renewed successfully"); assertTrue(null != dtSecretManager.retrievePassword(identifier)); dtSecretManager.renewToken(token,"JobTracker"); cluster.transitionToStandby(0); try { cluster.getNameNodeRpc(0).renewDelegationToken(token); fail("StandbyException is expected since nn0 is in standby state"); } catch ( StandbyException e) { GenericTestUtils.assertExceptionContains(HAServiceState.STANDBY.toString(),e); } new Thread(){ @Override public void run(){ try { cluster.transitionToActive(1); } catch ( Exception e) { LOG.error("Transition nn1 to active failed",e); } } } .start(); Thread.sleep(1000); try { nn1.getNamesystem().verifyToken(token.decodeIdentifier(),token.getPassword()); fail("RetriableException/StandbyException is expected since nn1 is in transition"); } catch ( IOException e) { assertTrue(e instanceof StandbyException || e instanceof RetriableException); LOG.info("Got expected exception",e); } catchup=true; synchronized (this) { this.notifyAll(); } Configuration clientConf=dfs.getConf(); doRenewOrCancel(token,clientConf,TokenTestAction.RENEW); doRenewOrCancel(token,clientConf,TokenTestAction.CANCEL); }

UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
@Test public void testDelegationTokenDFSApi() throws Exception { final Token token=getDelegationToken(fs,"JobTracker"); DelegationTokenIdentifier identifier=new DelegationTokenIdentifier(); byte[] tokenId=token.getIdentifier(); identifier.readFields(new DataInputStream(new ByteArrayInputStream(tokenId))); LOG.info("A valid token should have non-null password, " + "and should be renewed successfully"); assertTrue(null != dtSecretManager.retrievePassword(identifier)); dtSecretManager.renewToken(token,"JobTracker"); Configuration clientConf=dfs.getConf(); doRenewOrCancel(token,clientConf,TokenTestAction.RENEW); Configuration emptyConf=new Configuration(); try { doRenewOrCancel(token,emptyConf,TokenTestAction.RENEW); fail("Did not throw trying to renew with an empty conf!"); } catch ( IOException ioe) { GenericTestUtils.assertExceptionContains("Unable to map logical nameservice URI",ioe); } cluster.transitionToStandby(0); cluster.transitionToActive(1); doRenewOrCancel(token,clientConf,TokenTestAction.RENEW); doRenewOrCancel(token,clientConf,TokenTestAction.CANCEL); }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testHAUtilClonesDelegationTokens() throws Exception { final Token token=getDelegationToken(fs,"JobTracker"); UserGroupInformation ugi=UserGroupInformation.createRemoteUser("test"); URI haUri=new URI("hdfs://my-ha-uri/"); token.setService(HAUtil.buildTokenServiceForLogicalUri(haUri,HdfsConstants.HDFS_URI_SCHEME)); ugi.addToken(token); Collection nnAddrs=new HashSet(); nnAddrs.add(new InetSocketAddress("localhost",nn0.getNameNodeAddress().getPort())); nnAddrs.add(new InetSocketAddress("localhost",nn1.getNameNodeAddress().getPort())); HAUtil.cloneDelegationTokenForLogicalUri(ugi,haUri,nnAddrs); Collection> tokens=ugi.getTokens(); assertEquals(3,tokens.size()); LOG.info("Tokens:\n" + Joiner.on("\n").join(tokens)); DelegationTokenSelector dts=new DelegationTokenSelector(); for ( InetSocketAddress addr : nnAddrs) { Text ipcDtService=SecurityUtil.buildTokenService(addr); Token token2=dts.selectToken(ipcDtService,ugi.getTokens()); assertNotNull(token2); assertArrayEquals(token.getIdentifier(),token2.getIdentifier()); assertArrayEquals(token.getPassword(),token2.getPassword()); } SecurityUtilTestHelper.setTokenServiceUseIp(false); for ( InetSocketAddress addr : nnAddrs) { Text ipcDtService=SecurityUtil.buildTokenService(addr); Token token2=dts.selectToken(ipcDtService,ugi.getTokens()); assertNull(token2); } HAUtil.cloneDelegationTokenForLogicalUri(ugi,haUri,nnAddrs); for ( InetSocketAddress addr : nnAddrs) { Text ipcDtService=SecurityUtil.buildTokenService(addr); Token token2=dts.selectToken(ipcDtService,ugi.getTokens()); assertNotNull(token2); assertArrayEquals(token.getIdentifier(),token2.getIdentifier()); assertArrayEquals(token.getPassword(),token2.getPassword()); } }

Class: org.apache.hadoop.hdfs.server.namenode.ha.TestFailoverWithBlockTokensEnabled

APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void ensureInvalidBlockTokensAreRejected() throws IOException, URISyntaxException { cluster.transitionToActive(0); FileSystem fs=HATestUtil.configureFailoverFs(cluster,conf); DFSTestUtil.writeFile(fs,TEST_PATH,TEST_DATA); assertEquals(TEST_DATA,DFSTestUtil.readFile(fs,TEST_PATH)); DFSClient dfsClient=DFSClientAdapter.getDFSClient((DistributedFileSystem)fs); DFSClient spyDfsClient=Mockito.spy(dfsClient); Mockito.doAnswer(new Answer(){ @Override public LocatedBlocks answer( InvocationOnMock arg0) throws Throwable { LocatedBlocks locatedBlocks=(LocatedBlocks)arg0.callRealMethod(); for ( LocatedBlock lb : locatedBlocks.getLocatedBlocks()) { Token token=lb.getBlockToken(); BlockTokenIdentifier id=lb.getBlockToken().decodeIdentifier(); id.setExpiryDate(Time.now() + 10); Token newToken=new Token(id.getBytes(),token.getPassword(),token.getKind(),token.getService()); lb.setBlockToken(newToken); } return locatedBlocks; } } ).when(spyDfsClient).getLocatedBlocks(Mockito.anyString(),Mockito.anyLong(),Mockito.anyLong()); DFSClientAdapter.setDFSClient((DistributedFileSystem)fs,spyDfsClient); try { assertEquals(TEST_DATA,DFSTestUtil.readFile(fs,TEST_PATH)); fail("Shouldn't have been able to read a file with invalid block tokens"); } catch ( IOException ioe) { GenericTestUtils.assertExceptionContains("Could not obtain block",ioe); } }

Class: org.apache.hadoop.hdfs.server.namenode.ha.TestFailureOfSharedDir

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test that marking the shared edits dir as being "required" causes the NN to * fail if that dir can't be accessed. */ @Test public void testFailureOfSharedDir() throws Exception { Configuration conf=new Configuration(); conf.setLong(DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_KEY,2000); MiniDFSCluster cluster=null; File sharedEditsDir=null; try { cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0).checkExitOnShutdown(false).build(); cluster.waitActive(); cluster.transitionToActive(0); FileSystem fs=HATestUtil.configureFailoverFs(cluster,conf); assertTrue(fs.mkdirs(new Path("/test1"))); URI sharedEditsUri=cluster.getSharedEditsDir(0,1); sharedEditsDir=new File(sharedEditsUri); assertEquals(0,FileUtil.chmod(sharedEditsDir.getAbsolutePath(),"-w",true)); Thread.sleep(conf.getLong(DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_KEY,DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_DEFAULT) * 2); NameNode nn1=cluster.getNameNode(1); assertTrue(nn1.isStandbyState()); assertFalse("StandBy NameNode should not go to SafeMode on resource unavailability",nn1.isInSafeMode()); NameNode nn0=cluster.getNameNode(0); try { nn0.getRpcServer().rollEditLog(); fail("Succeeded in rolling edit log despite shared dir being deleted"); } catch ( ExitException ee) { GenericTestUtils.assertExceptionContains("finalize log segment 1, 3 failed for required journal",ee); } for ( URI editsUri : cluster.getNameEditsDirs(0)) { if (editsUri.equals(sharedEditsUri)) { continue; } File editsDir=new File(editsUri.getPath()); File curDir=new File(editsDir,"current"); GenericTestUtils.assertGlobEquals(curDir,"edits_.*",NNStorage.getInProgressEditsFileName(1)); } } finally { if (sharedEditsDir != null) { FileUtil.chmod(sharedEditsDir.getAbsolutePath(),"+w",true); } if (cluster != null) { cluster.shutdown(); } } }

UtilityVerifier EqualityVerifier HybridVerifier 
/** * Multiple shared edits directories is an invalid configuration. */ @Test public void testMultipleSharedDirsFails() throws Exception { Configuration conf=new Configuration(); URI sharedA=new URI("file:///shared-A"); URI sharedB=new URI("file:///shared-B"); URI localA=new URI("file:///local-A"); conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY,Joiner.on(",").join(sharedA,sharedB)); conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,localA.toString()); try { FSNamesystem.getNamespaceEditsDirs(conf); fail("Allowed multiple shared edits directories"); } catch ( IOException ioe) { assertEquals("Multiple shared edits directories are not yet supported",ioe.getMessage()); } }

Class: org.apache.hadoop.hdfs.server.namenode.ha.TestFailureToReadEdits

UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
/** * Ensure that the standby fails to become active if it cannot read all * available edits in the shared edits dir when it is transitioning to active * state. */ @Test public void testFailureToReadEditsOnTransitionToActive() throws Exception { assertTrue(fs.mkdirs(new Path(TEST_DIR1))); HATestUtil.waitForStandbyToCatchUp(nn0,nn1); HATestUtil.waitForCheckpoint(cluster,0,ImmutableList.of(0,3)); causeFailureOnEditLogRead(); assertTrue(fs.mkdirs(new Path(TEST_DIR2))); assertTrue(fs.mkdirs(new Path(TEST_DIR3))); try { HATestUtil.waitForStandbyToCatchUp(nn0,nn1); fail("Standby fully caught up, but should not have been able to"); } catch ( HATestUtil.CouldNotCatchUpException e) { } cluster.shutdownNameNode(0); try { cluster.transitionToActive(1); fail("Standby transitioned to active, but should not have been able to"); } catch ( ExitException ee) { GenericTestUtils.assertExceptionContains("Error replaying edit log",ee); } }

UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier 
/** * Test that the standby NN won't double-replay earlier edits if it encounters * a failure to read a later edit. */ @Test public void testFailuretoReadEdits() throws Exception { assertTrue(fs.mkdirs(new Path(TEST_DIR1))); HATestUtil.waitForStandbyToCatchUp(nn0,nn1); fs.setOwner(new Path(TEST_DIR1),"foo","bar"); assertTrue(fs.delete(new Path(TEST_DIR1),true)); assertTrue(fs.mkdirs(new Path(TEST_DIR2))); assertTrue(fs.mkdirs(new Path(TEST_DIR3))); LimitedEditLogAnswer answer=causeFailureOnEditLogRead(); try { HATestUtil.waitForStandbyToCatchUp(nn0,nn1); fail("Standby fully caught up, but should not have been able to"); } catch ( HATestUtil.CouldNotCatchUpException e) { } assertNull(NameNodeAdapter.getFileInfo(nn1,TEST_DIR1,false)); assertTrue(NameNodeAdapter.getFileInfo(nn1,TEST_DIR2,false).isDir()); assertNull(NameNodeAdapter.getFileInfo(nn1,TEST_DIR3,false)); answer.setThrowExceptionOnRead(false); HATestUtil.waitForStandbyToCatchUp(nn0,nn1); assertNull(NameNodeAdapter.getFileInfo(nn1,TEST_DIR1,false)); assertTrue(NameNodeAdapter.getFileInfo(nn1,TEST_DIR2,false).isDir()); assertTrue(NameNodeAdapter.getFileInfo(nn1,TEST_DIR3,false).isDir()); }

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
/** * Test the following case: * 1. SBN is reading a finalized edits file when NFS disappears halfway * through (or some intermittent error happens) * 2. SBN performs a checkpoint and uploads it to the NN * 3. NN receives a checkpoint that doesn't correspond to the end of any log * segment * 4. Both NN and SBN should be able to restart at this point. * This is a regression test for HDFS-2766. */ @Test public void testCheckpointStartingMidEditsFile() throws Exception { assertTrue(fs.mkdirs(new Path(TEST_DIR1))); HATestUtil.waitForStandbyToCatchUp(nn0,nn1); HATestUtil.waitForCheckpoint(cluster,1,ImmutableList.of(0,3)); HATestUtil.waitForCheckpoint(cluster,0,ImmutableList.of(0,3)); causeFailureOnEditLogRead(); assertTrue(fs.mkdirs(new Path(TEST_DIR2))); assertTrue(fs.mkdirs(new Path(TEST_DIR3))); try { HATestUtil.waitForStandbyToCatchUp(nn0,nn1); fail("Standby fully caught up, but should not have been able to"); } catch ( HATestUtil.CouldNotCatchUpException e) { } HATestUtil.waitForCheckpoint(cluster,1,ImmutableList.of(0,3,5)); HATestUtil.waitForCheckpoint(cluster,0,ImmutableList.of(0,3,5)); cluster.restartNameNode(0); HATestUtil.waitForCheckpoint(cluster,0,ImmutableList.of(0,3,5)); FileSystem fs0=null; try { fs0=FileSystem.get(NameNode.getUri(nn0.getNameNodeAddress()),conf); assertTrue(fs0.exists(new Path(TEST_DIR1))); assertTrue(fs0.exists(new Path(TEST_DIR2))); assertTrue(fs0.exists(new Path(TEST_DIR3))); } finally { if (fs0 != null) fs0.close(); } }

Class: org.apache.hadoop.hdfs.server.namenode.ha.TestHAMetrics

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testHAMetrics() throws Exception { Configuration conf=new Configuration(); conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY,1); conf.setInt(DFSConfigKeys.DFS_HA_LOGROLL_PERIOD_KEY,Integer.MAX_VALUE); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(1).build(); FileSystem fs=null; try { cluster.waitActive(); FSNamesystem nn0=cluster.getNamesystem(0); FSNamesystem nn1=cluster.getNamesystem(1); assertEquals(nn0.getHAState(),"standby"); assertTrue(0 < nn0.getMillisSinceLastLoadedEdits()); assertEquals(nn1.getHAState(),"standby"); assertTrue(0 < nn1.getMillisSinceLastLoadedEdits()); cluster.transitionToActive(0); assertEquals("active",nn0.getHAState()); assertEquals(0,nn0.getMillisSinceLastLoadedEdits()); assertEquals("standby",nn1.getHAState()); assertTrue(0 < nn1.getMillisSinceLastLoadedEdits()); cluster.transitionToStandby(0); cluster.transitionToActive(1); assertEquals("standby",nn0.getHAState()); assertTrue(0 < nn0.getMillisSinceLastLoadedEdits()); assertEquals("active",nn1.getHAState()); assertEquals(0,nn1.getMillisSinceLastLoadedEdits()); Thread.sleep(2000); assertTrue(2000 <= nn0.getMillisSinceLastLoadedEdits()); assertEquals(0,nn0.getPendingDataNodeMessageCount()); assertEquals(0,nn1.getPendingDataNodeMessageCount()); fs=HATestUtil.configureFailoverFs(cluster,conf); DFSTestUtil.createFile(fs,new Path("/foo"),10,(short)1,1L); assertTrue(0 < nn0.getPendingDataNodeMessageCount()); assertEquals(0,nn1.getPendingDataNodeMessageCount()); long millisSinceLastLoadedEdits=nn0.getMillisSinceLastLoadedEdits(); HATestUtil.waitForStandbyToCatchUp(cluster.getNameNode(1),cluster.getNameNode(0)); assertEquals(0,nn0.getPendingDataNodeMessageCount()); assertEquals(0,nn1.getPendingDataNodeMessageCount()); long newMillisSinceLastLoadedEdits=nn0.getMillisSinceLastLoadedEdits(); assertTrue("expected " + millisSinceLastLoadedEdits + " > "+ newMillisSinceLastLoadedEdits,millisSinceLastLoadedEdits > newMillisSinceLastLoadedEdits); } finally { IOUtils.cleanup(LOG,fs); cluster.shutdown(); } }

Class: org.apache.hadoop.hdfs.server.namenode.ha.TestHASafeMode

BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
/** * DFS#isInSafeMode should check the ActiveNNs safemode in HA enabled cluster. HDFS-3507 * @throws Exception */ @Test public void testIsInSafemode() throws Exception { NameNode nn2=cluster.getNameNode(1); assertTrue("nn2 should be in standby state",nn2.isStandbyState()); InetSocketAddress nameNodeAddress=nn2.getNameNodeAddress(); Configuration conf=new Configuration(); DistributedFileSystem dfs=new DistributedFileSystem(); try { dfs.initialize(URI.create("hdfs://" + nameNodeAddress.getHostName() + ":"+ nameNodeAddress.getPort()),conf); dfs.isInSafeMode(); fail("StandBy should throw exception for isInSafeMode"); } catch ( IOException e) { if (e instanceof RemoteException) { IOException sbExcpetion=((RemoteException)e).unwrapRemoteException(); assertTrue("StandBy nn should not support isInSafeMode",sbExcpetion instanceof StandbyException); } else { throw e; } } finally { if (null != dfs) { dfs.close(); } } cluster.transitionToStandby(0); cluster.transitionToActive(1); cluster.getNameNodeRpc(1).setSafeMode(SafeModeAction.SAFEMODE_ENTER,false); DistributedFileSystem dfsWithFailOver=(DistributedFileSystem)fs; assertTrue("ANN should be in SafeMode",dfsWithFailOver.isInSafeMode()); cluster.getNameNodeRpc(1).setSafeMode(SafeModeAction.SAFEMODE_LEAVE,false); assertFalse("ANN should be out of SafeMode",dfsWithFailOver.isInSafeMode()); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Make sure that when we transition to active in safe mode that we don't * prematurely consider blocks missing just because not all DNs have reported * yet. * This is a regression test for HDFS-3921. */ @Test public void testNoPopulatingReplQueuesWhenStartingActiveInSafeMode() throws IOException { DFSTestUtil.createFile(fs,new Path("/test"),15 * BLOCK_SIZE,(short)3,1L); cluster.stopDataNode(1); cluster.restartNameNode(0,false); cluster.transitionToActive(0); assertTrue(cluster.getNameNode(0).isInSafeMode()); assertEquals(0,cluster.getNamesystem(0).getMissingBlocksCount()); }

Class: org.apache.hadoop.hdfs.server.namenode.ha.TestHAStateTransitions

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test for HDFS-2812. Since lease renewals go from the client * only to the active NN, the SBN will have out-of-date lease * info when it becomes active. We need to make sure we don't * accidentally mark the leases as expired when the failover * proceeds. */ @Test(timeout=120000) public void testLeasesRenewedOnTransition() throws Exception { Configuration conf=new Configuration(); conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY,1); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(1).build(); FSDataOutputStream stm=null; FileSystem fs=HATestUtil.configureFailoverFs(cluster,conf); NameNode nn0=cluster.getNameNode(0); NameNode nn1=cluster.getNameNode(1); try { cluster.waitActive(); cluster.transitionToActive(0); LOG.info("Starting with NN 0 active"); stm=fs.create(TEST_FILE_PATH); long nn0t0=NameNodeAdapter.getLeaseRenewalTime(nn0,TEST_FILE_STR); assertTrue(nn0t0 > 0); long nn1t0=NameNodeAdapter.getLeaseRenewalTime(nn1,TEST_FILE_STR); assertEquals("Lease should not yet exist on nn1",-1,nn1t0); Thread.sleep(5); HATestUtil.waitForStandbyToCatchUp(nn0,nn1); long nn1t1=NameNodeAdapter.getLeaseRenewalTime(nn1,TEST_FILE_STR); assertTrue("Lease should have been created on standby. Time was: " + nn1t1,nn1t1 > nn0t0); Thread.sleep(5); LOG.info("Failing over to NN 1"); cluster.transitionToStandby(0); cluster.transitionToActive(1); long nn1t2=NameNodeAdapter.getLeaseRenewalTime(nn1,TEST_FILE_STR); assertTrue("Lease should have been renewed by failover process",nn1t2 > nn1t1); } finally { IOUtils.closeStream(stm); cluster.shutdown(); } }

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * This test also serves to test{@link HAUtil#getProxiesForAllNameNodesInNameservice(Configuration,String)} and{@link DFSUtil#getRpcAddressesForNameserviceId(Configuration,String,String)}by virtue of the fact that it wouldn't work properly if the proxies * returned were not for the correct NNs. */ @Test public void testIsAtLeastOneActive() throws Exception { MiniDFSCluster cluster=new MiniDFSCluster.Builder(new HdfsConfiguration()).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0).build(); try { Configuration conf=new HdfsConfiguration(); HATestUtil.setFailoverConfigurations(cluster,conf); List namenodes=HAUtil.getProxiesForAllNameNodesInNameservice(conf,HATestUtil.getLogicalHostname(cluster)); assertEquals(2,namenodes.size()); assertFalse(HAUtil.isAtLeastOneActive(namenodes)); cluster.transitionToActive(0); assertTrue(HAUtil.isAtLeastOneActive(namenodes)); cluster.transitionToStandby(0); assertFalse(HAUtil.isAtLeastOneActive(namenodes)); cluster.transitionToActive(1); assertTrue(HAUtil.isAtLeastOneActive(namenodes)); cluster.transitionToStandby(1); assertFalse(HAUtil.isAtLeastOneActive(namenodes)); } finally { if (cluster != null) { cluster.shutdown(); } } }

UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
/** * Test which takes a single node and flip flops between * active and standby mode, making sure it doesn't * double-play any edits. */ @Test public void testTransitionActiveToStandby() throws Exception { Configuration conf=new Configuration(); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(1).build(); try { cluster.waitActive(); cluster.transitionToActive(0); FileSystem fs=cluster.getFileSystem(0); fs.mkdirs(TEST_DIR); cluster.transitionToStandby(0); try { fs.mkdirs(new Path("/x")); fail("Didn't throw trying to mutate FS in standby state"); } catch ( Throwable t) { GenericTestUtils.assertExceptionContains("Operation category WRITE is not supported",t); } cluster.transitionToActive(0); DFSTestUtil.createFile(fs,new Path(TEST_DIR,"foo"),10,(short)1,1L); fs.delete(TEST_DIR,true); cluster.transitionToStandby(0); cluster.transitionToActive(0); assertFalse(fs.exists(TEST_DIR)); } finally { cluster.shutdown(); } }

Class: org.apache.hadoop.hdfs.server.namenode.ha.TestPendingCorruptDnMessages

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testChangedStorageId() throws IOException, URISyntaxException, InterruptedException { HdfsConfiguration conf=new HdfsConfiguration(); conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY,1); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).nnTopology(MiniDFSNNTopology.simpleHATopology()).build(); try { cluster.transitionToActive(0); FileSystem fs=HATestUtil.configureFailoverFs(cluster,conf); OutputStream out=fs.create(filePath); out.write("foo bar baz".getBytes()); out.close(); HATestUtil.waitForStandbyToCatchUp(cluster.getNameNode(0),cluster.getNameNode(1)); ExtendedBlock block=DFSTestUtil.getFirstBlock(fs,filePath); assertTrue(MiniDFSCluster.changeGenStampOfBlock(0,block,900)); DataNodeProperties dnProps=cluster.stopDataNode(0); cluster.restartNameNode(1,false); assertTrue(cluster.restartDataNode(dnProps,true)); while (cluster.getNamesystem(1).getBlockManager().getPendingDataNodeMessageCount() < 1) { ThreadUtil.sleepAtLeastIgnoreInterrupts(1000); } assertEquals(1,cluster.getNamesystem(1).getBlockManager().getPendingDataNodeMessageCount()); String oldStorageId=getRegisteredDatanodeUid(cluster,1); assertTrue(wipeAndRestartDn(cluster,0)); String newStorageId=""; do { ThreadUtil.sleepAtLeastIgnoreInterrupts(1000); newStorageId=getRegisteredDatanodeUid(cluster,1); System.out.println("====> oldStorageId: " + oldStorageId + " newStorageId: "+ newStorageId); } while (newStorageId.equals(oldStorageId)); assertEquals(0,cluster.getNamesystem(1).getBlockManager().getPendingDataNodeMessageCount()); cluster.transitionToStandby(0); cluster.transitionToActive(1); } finally { cluster.shutdown(); } }

Class: org.apache.hadoop.hdfs.server.namenode.ha.TestPipelinesFailover

APIUtilityVerifier BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
/** * Test the scenario where the NN fails over after issuing a block * synchronization request, but before it is committed. The * DN running the recovery should then fail to commit the synchronization * and a later retry will succeed. */ @Test(timeout=30000) public void testFailoverRightBeforeCommitSynchronization() throws Exception { final Configuration conf=new Configuration(); conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY,false); conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,BLOCK_SIZE); FSDataOutputStream stm=null; final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(3).build(); try { cluster.waitActive(); cluster.transitionToActive(0); Thread.sleep(500); LOG.info("Starting with NN 0 active"); FileSystem fs=HATestUtil.configureFailoverFs(cluster,conf); stm=fs.create(TEST_PATH); AppendTestUtil.write(stm,0,BLOCK_SIZE / 2); stm.hflush(); NameNode nn0=cluster.getNameNode(0); ExtendedBlock blk=DFSTestUtil.getFirstBlock(fs,TEST_PATH); DatanodeDescriptor expectedPrimary=DFSTestUtil.getExpectedPrimaryNode(nn0,blk); LOG.info("Expecting block recovery to be triggered on DN " + expectedPrimary); DataNode primaryDN=cluster.getDataNode(expectedPrimary.getIpcPort()); DatanodeProtocolClientSideTranslatorPB nnSpy=DataNodeTestUtils.spyOnBposToNN(primaryDN,nn0); DelayAnswer delayer=new DelayAnswer(LOG); Mockito.doAnswer(delayer).when(nnSpy).commitBlockSynchronization(Mockito.eq(blk),Mockito.anyInt(),Mockito.anyLong(),Mockito.eq(true),Mockito.eq(false),(DatanodeID[])Mockito.anyObject(),(String[])Mockito.anyObject()); DistributedFileSystem fsOtherUser=createFsAsOtherUser(cluster,conf); assertFalse(fsOtherUser.recoverLease(TEST_PATH)); LOG.info("Waiting for commitBlockSynchronization call from primary"); delayer.waitForCall(); LOG.info("Failing over to NN 1"); cluster.transitionToStandby(0); cluster.transitionToActive(1); delayer.proceed(); delayer.waitForResult(); Throwable t=delayer.getThrown(); if (t == null) { fail("commitBlockSynchronization call did not fail on standby"); } GenericTestUtils.assertExceptionContains("Operation category WRITE is not supported",t); loopRecoverLease(fsOtherUser,TEST_PATH); AppendTestUtil.check(fs,TEST_PATH,BLOCK_SIZE / 2); } finally { IOUtils.closeStream(stm); cluster.shutdown(); } }

Class: org.apache.hadoop.hdfs.server.namenode.ha.TestRetryCacheWithHA

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * 1. Run a set of operations * 2. Trigger the NN failover * 3. Check the retry cache on the original standby NN */ @Test(timeout=60000) public void testRetryCacheOnStandbyNN() throws Exception { DFSTestUtil.runOperations(cluster,dfs,conf,BlockSize,0); FSNamesystem fsn0=cluster.getNamesystem(0); LightWeightCache cacheSet=(LightWeightCache)fsn0.getRetryCache().getCacheSet(); assertEquals(23,cacheSet.size()); Map oldEntries=new HashMap(); Iterator iter=cacheSet.iterator(); while (iter.hasNext()) { CacheEntry entry=iter.next(); oldEntries.put(entry,entry); } cluster.getNameNode(0).getRpcServer().rollEditLog(); cluster.getNameNode(1).getNamesystem().getEditLogTailer().doTailEdits(); cluster.shutdownNameNode(0); cluster.transitionToActive(1); FSNamesystem fsn1=cluster.getNamesystem(1); cacheSet=(LightWeightCache)fsn1.getRetryCache().getCacheSet(); assertEquals(23,cacheSet.size()); iter=cacheSet.iterator(); while (iter.hasNext()) { CacheEntry entry=iter.next(); assertTrue(oldEntries.containsKey(entry)); } }

Class: org.apache.hadoop.hdfs.server.namenode.ha.TestStandbyCheckpoints

UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
/** * Make sure that clients will receive StandbyExceptions even when a * checkpoint is in progress on the SBN, and therefore the StandbyCheckpointer * thread will have FSNS lock. Regression test for HDFS-4591. */ @Test(timeout=300000) public void testStandbyExceptionThrownDuringCheckpoint() throws Exception { FSImage spyImage1=NameNodeAdapter.spyOnFsImage(nn1); DelayAnswer answerer=new DelayAnswer(LOG); Mockito.doAnswer(answerer).when(spyImage1).saveNamespace(Mockito.any(FSNamesystem.class),Mockito.eq(NameNodeFile.IMAGE),Mockito.any(Canceler.class)); doEdits(0,1000); nn0.getRpcServer().rollEditLog(); answerer.waitForCall(); assertTrue("SBN is not performing checkpoint but it should be.",answerer.getFireCount() == 1 && answerer.getResultCount() == 0); ThreadUtil.sleepAtLeastIgnoreInterrupts(1000); try { nn1.getRpcServer().getFileInfo("/"); fail("Should have thrown StandbyException, but instead succeeded."); } catch ( StandbyException se) { GenericTestUtils.assertExceptionContains("is not supported",se); } assertTrue("SBN should have still been checkpointing.",answerer.getFireCount() == 1 && answerer.getResultCount() == 0); answerer.proceed(); answerer.waitForResult(); assertTrue("SBN should have finished checkpointing.",answerer.getFireCount() == 1 && answerer.getResultCount() == 1); }

Class: org.apache.hadoop.hdfs.server.namenode.snapshot.TestINodeFileUnderConstructionWithSnapshot

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test snapshot during file appending, before the corresponding{@link FSDataOutputStream} instance closes. */ @Test(timeout=60000) public void testSnapshotWhileAppending() throws Exception { Path file=new Path(dir,"file"); DFSTestUtil.createFile(hdfs,file,BLOCKSIZE,REPLICATION,seed); HdfsDataOutputStream out=appendFileWithoutClosing(file,BLOCKSIZE); out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH)); SnapshotTestHelper.createSnapshot(hdfs,dir,"s0"); out.close(); INodeFile fileNode=(INodeFile)fsdir.getINode(file.toString()); assertEquals(BLOCKSIZE * 2,fileNode.computeFileSize()); INodeDirectory dirNode=fsdir.getINode(dir.toString()).asDirectory(); DirectoryDiff last=dirNode.getDiffs().getLast(); out=appendFileWithoutClosing(file,BLOCKSIZE); out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH)); dirNode=fsdir.getINode(dir.toString()).asDirectory(); assertEquals(BLOCKSIZE * 2,fileNode.computeFileSize(last.getSnapshotId())); hdfs.createSnapshot(dir,"s1"); out.close(); fileNode=(INodeFile)fsdir.getINode(file.toString()); dirNode=fsdir.getINode(dir.toString()).asDirectory(); last=dirNode.getDiffs().getLast(); assertTrue(fileNode.isWithSnapshot()); assertEquals(BLOCKSIZE * 3,fileNode.computeFileSize(last.getSnapshotId())); hdfs.setReplication(file,(short)(REPLICATION - 1)); out=appendFileWithoutClosing(file,BLOCKSIZE); hdfs.createSnapshot(dir,"s2"); out.close(); assertEquals(BLOCKSIZE * 3,fileNode.computeFileSize(last.getSnapshotId())); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * call DFSClient#callGetBlockLocations(...) for snapshot file. Make sure only * blocks within the size range are returned. */ @Test public void testGetBlockLocations() throws Exception { final Path root=new Path("/"); final Path file=new Path("/file"); DFSTestUtil.createFile(hdfs,file,BLOCKSIZE,REPLICATION,seed); SnapshotTestHelper.createSnapshot(hdfs,root,"s1"); final Path fileInSnapshot=SnapshotTestHelper.getSnapshotPath(root,"s1",file.getName()); FileStatus status=hdfs.getFileStatus(fileInSnapshot); assertEquals(BLOCKSIZE,status.getLen()); DFSTestUtil.appendFile(hdfs,file,BLOCKSIZE - 1); status=hdfs.getFileStatus(fileInSnapshot); assertEquals(BLOCKSIZE,status.getLen()); status=hdfs.getFileStatus(file); assertEquals(BLOCKSIZE * 2 - 1,status.getLen()); LocatedBlocks blocks=DFSClientAdapter.callGetBlockLocations(cluster.getNameNodeRpc(),fileInSnapshot.toString(),0,Long.MAX_VALUE); List blockList=blocks.getLocatedBlocks(); assertEquals(BLOCKSIZE,blocks.getFileLength()); assertEquals(1,blockList.size()); LocatedBlock lastBlock=blocks.getLastLocatedBlock(); assertEquals(0,lastBlock.getStartOffset()); assertEquals(BLOCKSIZE,lastBlock.getBlockSize()); SnapshotTestHelper.createSnapshot(hdfs,root,"s2"); final Path fileInSnapshot2=SnapshotTestHelper.getSnapshotPath(root,"s2",file.getName()); HdfsDataOutputStream out=appendFileWithoutClosing(file,BLOCKSIZE); out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH)); status=hdfs.getFileStatus(fileInSnapshot2); assertEquals(BLOCKSIZE * 2 - 1,status.getLen()); status=hdfs.getFileStatus(file); assertEquals(BLOCKSIZE * 3 - 1,status.getLen()); blocks=DFSClientAdapter.callGetBlockLocations(cluster.getNameNodeRpc(),fileInSnapshot2.toString(),0,Long.MAX_VALUE); assertFalse(blocks.isUnderConstruction()); assertTrue(blocks.isLastBlockComplete()); blockList=blocks.getLocatedBlocks(); assertEquals(BLOCKSIZE * 2 - 1,blocks.getFileLength()); assertEquals(2,blockList.size()); lastBlock=blocks.getLastLocatedBlock(); assertEquals(BLOCKSIZE,lastBlock.getStartOffset()); assertEquals(BLOCKSIZE,lastBlock.getBlockSize()); blocks=DFSClientAdapter.callGetBlockLocations(cluster.getNameNodeRpc(),fileInSnapshot2.toString(),BLOCKSIZE,0); blockList=blocks.getLocatedBlocks(); assertEquals(1,blockList.size()); blocks=DFSClientAdapter.callGetBlockLocations(cluster.getNameNodeRpc(),file.toString(),0,Long.MAX_VALUE); blockList=blocks.getLocatedBlocks(); assertEquals(3,blockList.size()); assertTrue(blocks.isUnderConstruction()); assertFalse(blocks.isLastBlockComplete()); lastBlock=blocks.getLastLocatedBlock(); assertEquals(BLOCKSIZE * 2,lastBlock.getStartOffset()); assertEquals(BLOCKSIZE - 1,lastBlock.getBlockSize()); out.close(); }

Class: org.apache.hadoop.hdfs.server.namenode.snapshot.TestNestedSnapshots

APIUtilityVerifier IterativeVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
/** * Test the snapshot limit of a single snapshottable directory. * @throws Exception */ @Test(timeout=300000) public void testSnapshotLimit() throws Exception { final int step=1000; final String dirStr="/testSnapshotLimit/dir"; final Path dir=new Path(dirStr); hdfs.mkdirs(dir,new FsPermission((short)0777)); hdfs.allowSnapshot(dir); int s=0; for (; s < SNAPSHOT_LIMIT; s++) { final String snapshotName="s" + s; hdfs.createSnapshot(dir,snapshotName); if (s % step == 0) { final Path file=new Path(dirStr,"f" + s); DFSTestUtil.createFile(hdfs,file,BLOCKSIZE,REPLICATION,SEED); } } try { hdfs.createSnapshot(dir,"s" + s); Assert.fail("Expected to fail to create snapshot, but didn't."); } catch ( IOException ioe) { SnapshotTestHelper.LOG.info("The exception is expected.",ioe); } for (int f=0; f < SNAPSHOT_LIMIT; f+=step) { final String file="f" + f; s=RANDOM.nextInt(step); for (; s < SNAPSHOT_LIMIT; s+=RANDOM.nextInt(step)) { final Path p=SnapshotTestHelper.getSnapshotPath(dir,"s" + s,file); Assert.assertEquals(s > f,hdfs.exists(p)); } } }

UtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier 
@Test(timeout=300000) public void testSnapshotWithQuota() throws Exception { final String dirStr="/testSnapshotWithQuota/dir"; final Path dir=new Path(dirStr); hdfs.mkdirs(dir,new FsPermission((short)0777)); hdfs.allowSnapshot(dir); final int NS_QUOTA=6; hdfs.setQuota(dir,NS_QUOTA,HdfsConstants.QUOTA_DONT_SET); final Path foo=new Path(dir,"foo"); final Path f1=new Path(foo,"f1"); DFSTestUtil.createFile(hdfs,f1,BLOCKSIZE,REPLICATION,SEED); { final Path snapshotPath=hdfs.createSnapshot(dir); final String snapshotName=snapshotPath.getName(); Assert.assertTrue("snapshotName=" + snapshotName,Pattern.matches("s\\d\\d\\d\\d\\d\\d\\d\\d-\\d\\d\\d\\d\\d\\d\\.\\d\\d\\d",snapshotName)); final Path parent=snapshotPath.getParent(); Assert.assertEquals(HdfsConstants.DOT_SNAPSHOT_DIR,parent.getName()); Assert.assertEquals(dir,parent.getParent()); } final Path f2=new Path(foo,"f2"); DFSTestUtil.createFile(hdfs,f2,BLOCKSIZE,REPLICATION,SEED); try { final Path f3=new Path(foo,"f3"); DFSTestUtil.createFile(hdfs,f3,BLOCKSIZE,REPLICATION,SEED); Assert.fail(); } catch ( NSQuotaExceededException e) { SnapshotTestHelper.LOG.info("The exception is expected.",e); } try { hdfs.createSnapshot(dir); Assert.fail(); } catch ( NSQuotaExceededException e) { SnapshotTestHelper.LOG.info("The exception is expected.",e); } try { hdfs.setPermission(f1,new FsPermission((short)0)); Assert.fail(); } catch ( RemoteException e) { Assert.assertSame(NSQuotaExceededException.class,e.unwrapRemoteException().getClass()); SnapshotTestHelper.LOG.info("The exception is expected.",e); } hdfs.setPermission(f2,new FsPermission((short)0)); hdfs.setQuota(dir,NS_QUOTA + 2,HdfsConstants.QUOTA_DONT_SET); hdfs.createSnapshot(dir,"s1"); hdfs.setPermission(foo,new FsPermission((short)0444)); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test {@link Snapshot#ID_COMPARATOR}. */ @Test(timeout=300000) public void testIdCmp(){ final PermissionStatus perm=PermissionStatus.createImmutable("user","group",FsPermission.createImmutable((short)0)); final INodeDirectory snapshottable=new INodeDirectory(0,DFSUtil.string2Bytes("foo"),perm,0L); snapshottable.addSnapshottableFeature(); final Snapshot[] snapshots={new Snapshot(1,"s1",snapshottable),new Snapshot(1,"s1",snapshottable),new Snapshot(2,"s2",snapshottable),new Snapshot(2,"s2",snapshottable)}; Assert.assertEquals(0,Snapshot.ID_COMPARATOR.compare(null,null)); for ( Snapshot s : snapshots) { Assert.assertTrue(Snapshot.ID_COMPARATOR.compare(null,s) > 0); Assert.assertTrue(Snapshot.ID_COMPARATOR.compare(s,null) < 0); for ( Snapshot t : snapshots) { final int expected=s.getRoot().getLocalName().compareTo(t.getRoot().getLocalName()); final int computed=Snapshot.ID_COMPARATOR.compare(s,t); Assert.assertEquals(expected > 0,computed > 0); Assert.assertEquals(expected == 0,computed == 0); Assert.assertEquals(expected < 0,computed < 0); } } }

Class: org.apache.hadoop.hdfs.server.namenode.snapshot.TestRenameWithSnapshots

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier 
@Test(timeout=300000) public void testRenameFromSDir2NonSDir() throws Exception { final String dirStr="/testRenameWithSnapshot"; final String abcStr=dirStr + "/abc"; final Path abc=new Path(abcStr); hdfs.mkdirs(abc,new FsPermission((short)0777)); hdfs.allowSnapshot(abc); final Path foo=new Path(abc,"foo"); DFSTestUtil.createFile(hdfs,foo,BLOCKSIZE,REPL,SEED); hdfs.createSnapshot(abc,"s0"); try { hdfs.rename(abc,new Path(dirStr,"tmp")); fail("Expect exception since " + abc + " is snapshottable and already has snapshots"); } catch ( IOException e) { GenericTestUtils.assertExceptionContains(abcStr + " is snapshottable and already has snapshots",e); } final String xyzStr=dirStr + "/xyz"; final Path xyz=new Path(xyzStr); hdfs.mkdirs(xyz,new FsPermission((short)0777)); final Path bar=new Path(xyz,"bar"); hdfs.rename(foo,bar); final INode fooRef=fsdir.getINode(SnapshotTestHelper.getSnapshotPath(abc,"s0","foo").toString()); Assert.assertTrue(fooRef.isReference()); Assert.assertTrue(fooRef.asReference() instanceof INodeReference.WithName); final INodeReference.WithCount withCount=(INodeReference.WithCount)fooRef.asReference().getReferredINode(); Assert.assertEquals(2,withCount.getReferenceCount()); final INode barRef=fsdir.getINode(bar.toString()); Assert.assertTrue(barRef.isReference()); Assert.assertSame(withCount,barRef.asReference().getReferredINode()); hdfs.delete(bar,false); Assert.assertEquals(1,withCount.getReferenceCount()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Make sure we clean the whole subtree under a DstReference node after * deleting a snapshot. * see HDFS-5476. */ @Test public void testCleanDstReference() throws Exception { final Path test=new Path("/test"); final Path foo=new Path(test,"foo"); final Path bar=new Path(foo,"bar"); hdfs.mkdirs(bar); SnapshotTestHelper.createSnapshot(hdfs,test,"s0"); final Path fileInBar=new Path(bar,"file"); DFSTestUtil.createFile(hdfs,fileInBar,BLOCKSIZE,REPL,SEED); final Path foo2=new Path(test,"foo2"); hdfs.rename(foo,foo2); hdfs.createSnapshot(test,"s1"); hdfs.delete(new Path(foo2,"bar"),true); hdfs.delete(foo2,true); final Path sfileInBar=SnapshotTestHelper.getSnapshotPath(test,"s1","foo2/bar/file"); assertTrue(hdfs.exists(sfileInBar)); hdfs.deleteSnapshot(test,"s1"); assertFalse(hdfs.exists(sfileInBar)); restartClusterAndCheckImage(true); final Path barInS0=SnapshotTestHelper.getSnapshotPath(test,"s0","foo/bar"); INodeDirectory barNode=fsdir.getINode(barInS0.toString()).asDirectory(); assertEquals(0,barNode.getChildrenList(Snapshot.CURRENT_STATE_ID).size()); List diffList=barNode.getDiffs().asList(); assertEquals(1,diffList.size()); DirectoryDiff diff=diffList.get(0); assertEquals(0,diff.getChildrenDiff().getList(ListType.DELETED).size()); assertEquals(0,diff.getChildrenDiff().getList(ListType.CREATED).size()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test the undo section of rename. Before the rename, we create the renamed * file/dir before taking the snapshot. */ @Test public void testRenameUndo_1() throws Exception { final Path sdir1=new Path("/dir1"); final Path sdir2=new Path("/dir2"); hdfs.mkdirs(sdir1); hdfs.mkdirs(sdir2); final Path foo=new Path(sdir1,"foo"); final Path bar=new Path(foo,"bar"); DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPL,SEED); final Path dir2file=new Path(sdir2,"file"); DFSTestUtil.createFile(hdfs,dir2file,BLOCKSIZE,REPL,SEED); SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1"); INodeDirectory dir2=fsdir.getINode4Write(sdir2.toString()).asDirectory(); INodeDirectory mockDir2=spy(dir2); doReturn(false).when(mockDir2).addChild((INode)anyObject(),anyBoolean(),Mockito.anyInt()); INodeDirectory root=fsdir.getINode4Write("/").asDirectory(); root.replaceChild(dir2,mockDir2,fsdir.getINodeMap()); final Path newfoo=new Path(sdir2,"foo"); boolean result=hdfs.rename(foo,newfoo); assertFalse(result); INodeDirectory dir1Node=fsdir.getINode4Write(sdir1.toString()).asDirectory(); Snapshot s1=dir1Node.getSnapshot(DFSUtil.string2Bytes("s1")); ReadOnlyList dir1Children=dir1Node.getChildrenList(Snapshot.CURRENT_STATE_ID); assertEquals(1,dir1Children.size()); assertEquals(foo.getName(),dir1Children.get(0).getLocalName()); List dir1Diffs=dir1Node.getDiffs().asList(); assertEquals(1,dir1Diffs.size()); assertEquals(s1.getId(),dir1Diffs.get(0).getSnapshotId()); ChildrenDiff childrenDiff=dir1Diffs.get(0).getChildrenDiff(); assertEquals(0,childrenDiff.getList(ListType.DELETED).size()); assertEquals(0,childrenDiff.getList(ListType.CREATED).size()); INode fooNode=fsdir.getINode4Write(foo.toString()); assertTrue(fooNode.isDirectory() && fooNode.asDirectory().isWithSnapshot()); List fooDiffs=fooNode.asDirectory().getDiffs().asList(); assertEquals(1,fooDiffs.size()); assertEquals(s1.getId(),fooDiffs.get(0).getSnapshotId()); final Path foo_s1=SnapshotTestHelper.getSnapshotPath(sdir1,"s1","foo"); INode fooNode_s1=fsdir.getINode(foo_s1.toString()); assertTrue(fooNode_s1 == fooNode); assertFalse(hdfs.exists(newfoo)); INodeDirectory dir2Node=fsdir.getINode4Write(sdir2.toString()).asDirectory(); assertFalse(dir2Node.isWithSnapshot()); ReadOnlyList dir2Children=dir2Node.getChildrenList(Snapshot.CURRENT_STATE_ID); assertEquals(1,dir2Children.size()); assertEquals(dir2file.getName(),dir2Children.get(0).getLocalName()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier 
/** * Unit test for HDFS-4842. */ @Test public void testRenameDirAndDeleteSnapshot_7() throws Exception { fsn.getSnapshotManager().setAllowNestedSnapshots(true); final Path test=new Path("/test"); final Path dir1=new Path(test,"dir1"); final Path dir2=new Path(test,"dir2"); hdfs.mkdirs(dir1); hdfs.mkdirs(dir2); final Path foo=new Path(dir2,"foo"); final Path bar=new Path(foo,"bar"); final Path file=new Path(bar,"file"); DFSTestUtil.createFile(hdfs,file,BLOCKSIZE,REPL,SEED); SnapshotTestHelper.createSnapshot(hdfs,test,"s0"); SnapshotTestHelper.createSnapshot(hdfs,test,"s1"); hdfs.delete(file,true); SnapshotTestHelper.createSnapshot(hdfs,dir2,"s2"); final Path newfoo=new Path(dir1,foo.getName()); hdfs.rename(foo,newfoo); hdfs.deleteSnapshot(test,"s1"); final Path file_s2=SnapshotTestHelper.getSnapshotPath(dir2,"s2","foo/bar/file"); assertFalse(hdfs.exists(file_s2)); final Path file_s0=SnapshotTestHelper.getSnapshotPath(test,"s0","dir2/foo/bar/file"); assertTrue(hdfs.exists(file_s0)); INodeDirectory dir1Node=fsdir.getINode4Write(dir1.toString()).asDirectory(); List dir1DiffList=dir1Node.getDiffs().asList(); assertEquals(1,dir1DiffList.size()); List dList=dir1DiffList.get(0).getChildrenDiff().getList(ListType.DELETED); assertTrue(dList.isEmpty()); List cList=dir1DiffList.get(0).getChildrenDiff().getList(ListType.CREATED); assertEquals(1,cList.size()); INode cNode=cList.get(0); INode fooNode=fsdir.getINode4Write(newfoo.toString()); assertSame(cNode,fooNode); final Path newbar=new Path(newfoo,bar.getName()); INodeDirectory barNode=fsdir.getINode4Write(newbar.toString()).asDirectory(); assertSame(fooNode.asDirectory(),barNode.getParent()); List barDiffList=barNode.getDiffs().asList(); assertEquals(1,barDiffList.size()); DirectoryDiff diff=barDiffList.get(0); INodeDirectory testNode=fsdir.getINode4Write(test.toString()).asDirectory(); Snapshot s0=testNode.getSnapshot(DFSUtil.string2Bytes("s0")); assertEquals(s0.getId(),diff.getSnapshotId()); assertEquals("file",diff.getChildrenDiff().getList(ListType.DELETED).get(0).getLocalName()); INodeDirectory dir2Node=fsdir.getINode4Write(dir2.toString()).asDirectory(); List dir2DiffList=dir2Node.getDiffs().asList(); assertEquals(1,dir2DiffList.size()); dList=dir2DiffList.get(0).getChildrenDiff().getList(ListType.DELETED); assertEquals(1,dList.size()); final Path foo_s2=SnapshotTestHelper.getSnapshotPath(dir2,"s2",foo.getName()); INodeReference.WithName fooNode_s2=(INodeReference.WithName)fsdir.getINode(foo_s2.toString()); assertSame(dList.get(0),fooNode_s2); assertSame(fooNode.asReference().getReferredINode(),fooNode_s2.getReferredINode()); restartClusterAndCheckImage(true); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test the undo section of rename. Before the rename, we create the renamed * file/dir after taking the snapshot. */ @Test public void testRenameUndo_2() throws Exception { final Path sdir1=new Path("/dir1"); final Path sdir2=new Path("/dir2"); hdfs.mkdirs(sdir1); hdfs.mkdirs(sdir2); final Path dir2file=new Path(sdir2,"file"); DFSTestUtil.createFile(hdfs,dir2file,BLOCKSIZE,REPL,SEED); SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1"); final Path foo=new Path(sdir1,"foo"); final Path bar=new Path(foo,"bar"); DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPL,SEED); INodeDirectory dir2=fsdir.getINode4Write(sdir2.toString()).asDirectory(); INodeDirectory mockDir2=spy(dir2); doReturn(false).when(mockDir2).addChild((INode)anyObject(),anyBoolean(),Mockito.anyInt()); INodeDirectory root=fsdir.getINode4Write("/").asDirectory(); root.replaceChild(dir2,mockDir2,fsdir.getINodeMap()); final Path newfoo=new Path(sdir2,"foo"); boolean result=hdfs.rename(foo,newfoo); assertFalse(result); INodeDirectory dir1Node=fsdir.getINode4Write(sdir1.toString()).asDirectory(); Snapshot s1=dir1Node.getSnapshot(DFSUtil.string2Bytes("s1")); ReadOnlyList dir1Children=dir1Node.getChildrenList(Snapshot.CURRENT_STATE_ID); assertEquals(1,dir1Children.size()); assertEquals(foo.getName(),dir1Children.get(0).getLocalName()); List dir1Diffs=dir1Node.getDiffs().asList(); assertEquals(1,dir1Diffs.size()); assertEquals(s1.getId(),dir1Diffs.get(0).getSnapshotId()); ChildrenDiff childrenDiff=dir1Diffs.get(0).getChildrenDiff(); assertEquals(0,childrenDiff.getList(ListType.DELETED).size()); assertEquals(1,childrenDiff.getList(ListType.CREATED).size()); INode fooNode=fsdir.getINode4Write(foo.toString()); assertTrue(fooNode instanceof INodeDirectory); assertTrue(childrenDiff.getList(ListType.CREATED).get(0) == fooNode); final Path foo_s1=SnapshotTestHelper.getSnapshotPath(sdir1,"s1","foo"); assertFalse(hdfs.exists(foo_s1)); assertFalse(hdfs.exists(newfoo)); INodeDirectory dir2Node=fsdir.getINode4Write(sdir2.toString()).asDirectory(); assertFalse(dir2Node.isWithSnapshot()); ReadOnlyList dir2Children=dir2Node.getChildrenList(Snapshot.CURRENT_STATE_ID); assertEquals(1,dir2Children.size()); assertEquals(dir2file.getName(),dir2Children.get(0).getLocalName()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test rename a dir and a file multiple times across snapshottable * directories: /dir1/foo -> /dir2/foo -> /dir3/foo -> /dir2/foo -> /dir1/foo * Only create snapshots in the beginning (before the rename). */ @Test public void testRenameMoreThanOnceAcrossSnapDirs() throws Exception { final Path sdir1=new Path("/dir1"); final Path sdir2=new Path("/dir2"); final Path sdir3=new Path("/dir3"); hdfs.mkdirs(sdir1); hdfs.mkdirs(sdir2); hdfs.mkdirs(sdir3); final Path foo_dir1=new Path(sdir1,"foo"); final Path bar1_dir1=new Path(foo_dir1,"bar1"); final Path bar2_dir1=new Path(sdir1,"bar"); DFSTestUtil.createFile(hdfs,bar1_dir1,BLOCKSIZE,REPL,SEED); DFSTestUtil.createFile(hdfs,bar2_dir1,BLOCKSIZE,REPL,SEED); SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1"); SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s2"); SnapshotTestHelper.createSnapshot(hdfs,sdir3,"s3"); final Path foo_dir2=new Path(sdir2,"foo"); hdfs.rename(foo_dir1,foo_dir2); final Path bar2_dir2=new Path(sdir2,"bar"); hdfs.rename(bar2_dir1,bar2_dir2); restartClusterAndCheckImage(true); final Path bar1_dir2=new Path(foo_dir2,"bar1"); hdfs.setReplication(bar1_dir2,REPL_1); hdfs.setReplication(bar2_dir2,REPL_1); final Path bar1_s1=SnapshotTestHelper.getSnapshotPath(sdir1,"s1","foo/bar1"); final Path bar2_s1=SnapshotTestHelper.getSnapshotPath(sdir1,"s1","bar"); final Path bar1_s2=SnapshotTestHelper.getSnapshotPath(sdir2,"s2","foo/bar1"); final Path bar2_s2=SnapshotTestHelper.getSnapshotPath(sdir2,"s2","bar"); assertTrue(hdfs.exists(bar1_s1)); assertTrue(hdfs.exists(bar2_s1)); assertFalse(hdfs.exists(bar1_s2)); assertFalse(hdfs.exists(bar2_s2)); FileStatus statusBar1=hdfs.getFileStatus(bar1_s1); assertEquals(REPL,statusBar1.getReplication()); statusBar1=hdfs.getFileStatus(bar1_dir2); assertEquals(REPL_1,statusBar1.getReplication()); FileStatus statusBar2=hdfs.getFileStatus(bar2_s1); assertEquals(REPL,statusBar2.getReplication()); statusBar2=hdfs.getFileStatus(bar2_dir2); assertEquals(REPL_1,statusBar2.getReplication()); final Path foo_dir3=new Path(sdir3,"foo"); hdfs.rename(foo_dir2,foo_dir3); final Path bar2_dir3=new Path(sdir3,"bar"); hdfs.rename(bar2_dir2,bar2_dir3); restartClusterAndCheckImage(true); final Path bar1_dir3=new Path(foo_dir3,"bar1"); hdfs.setReplication(bar1_dir3,REPL_2); hdfs.setReplication(bar2_dir3,REPL_2); final Path bar1_s3=SnapshotTestHelper.getSnapshotPath(sdir3,"s3","foo/bar1"); final Path bar2_s3=SnapshotTestHelper.getSnapshotPath(sdir3,"s3","bar"); assertTrue(hdfs.exists(bar1_s1)); assertTrue(hdfs.exists(bar2_s1)); assertFalse(hdfs.exists(bar1_s2)); assertFalse(hdfs.exists(bar2_s2)); assertFalse(hdfs.exists(bar1_s3)); assertFalse(hdfs.exists(bar2_s3)); statusBar1=hdfs.getFileStatus(bar1_s1); assertEquals(REPL,statusBar1.getReplication()); statusBar1=hdfs.getFileStatus(bar1_dir3); assertEquals(REPL_2,statusBar1.getReplication()); statusBar2=hdfs.getFileStatus(bar2_s1); assertEquals(REPL,statusBar2.getReplication()); statusBar2=hdfs.getFileStatus(bar2_dir3); assertEquals(REPL_2,statusBar2.getReplication()); hdfs.rename(foo_dir3,foo_dir2); hdfs.rename(bar2_dir3,bar2_dir2); restartClusterAndCheckImage(true); hdfs.setReplication(bar1_dir2,REPL); hdfs.setReplication(bar2_dir2,REPL); assertTrue(hdfs.exists(bar1_s1)); assertTrue(hdfs.exists(bar2_s1)); assertFalse(hdfs.exists(bar1_s2)); assertFalse(hdfs.exists(bar2_s2)); assertFalse(hdfs.exists(bar1_s3)); assertFalse(hdfs.exists(bar2_s3)); statusBar1=hdfs.getFileStatus(bar1_s1); assertEquals(REPL,statusBar1.getReplication()); statusBar1=hdfs.getFileStatus(bar1_dir2); assertEquals(REPL,statusBar1.getReplication()); statusBar2=hdfs.getFileStatus(bar2_s1); assertEquals(REPL,statusBar2.getReplication()); statusBar2=hdfs.getFileStatus(bar2_dir2); assertEquals(REPL,statusBar2.getReplication()); hdfs.rename(foo_dir2,foo_dir1); hdfs.rename(bar2_dir2,bar2_dir1); INodeReference fooRef=fsdir.getINode4Write(foo_dir1.toString()).asReference(); INodeReference.WithCount fooWithCount=(WithCount)fooRef.getReferredINode(); assertEquals(2,fooWithCount.getReferenceCount()); INodeDirectory foo=fooWithCount.asDirectory(); assertEquals(1,foo.getDiffs().asList().size()); INodeDirectory sdir1Node=fsdir.getINode(sdir1.toString()).asDirectory(); Snapshot s1=sdir1Node.getSnapshot(DFSUtil.string2Bytes("s1")); assertEquals(s1.getId(),foo.getDirectoryWithSnapshotFeature().getLastSnapshotId()); INodeFile bar1=fsdir.getINode4Write(bar1_dir1.toString()).asFile(); assertEquals(1,bar1.getDiffs().asList().size()); assertEquals(s1.getId(),bar1.getDiffs().getLastSnapshotId()); INodeReference barRef=fsdir.getINode4Write(bar2_dir1.toString()).asReference(); INodeReference.WithCount barWithCount=(WithCount)barRef.getReferredINode(); assertEquals(2,barWithCount.getReferenceCount()); INodeFile bar=barWithCount.asFile(); assertEquals(1,bar.getDiffs().asList().size()); assertEquals(s1.getId(),bar.getDiffs().getLastSnapshotId()); restartClusterAndCheckImage(true); hdfs.delete(foo_dir1,true); hdfs.delete(bar2_dir1,true); restartClusterAndCheckImage(true); assertTrue(hdfs.exists(bar1_s1)); assertTrue(hdfs.exists(bar2_s1)); assertFalse(hdfs.exists(bar1_s2)); assertFalse(hdfs.exists(bar2_s2)); assertFalse(hdfs.exists(bar1_s3)); assertFalse(hdfs.exists(bar2_s3)); assertFalse(hdfs.exists(foo_dir1)); assertFalse(hdfs.exists(bar1_dir1)); assertFalse(hdfs.exists(bar2_dir1)); statusBar1=hdfs.getFileStatus(bar1_s1); assertEquals(REPL,statusBar1.getReplication()); statusBar2=hdfs.getFileStatus(bar2_s1); assertEquals(REPL,statusBar2.getReplication()); final Path foo_s1=SnapshotTestHelper.getSnapshotPath(sdir1,"s1","foo"); fooRef=fsdir.getINode(foo_s1.toString()).asReference(); fooWithCount=(WithCount)fooRef.getReferredINode(); assertEquals(1,fooWithCount.getReferenceCount()); barRef=fsdir.getINode(bar2_s1.toString()).asReference(); barWithCount=(WithCount)barRef.getReferredINode(); assertEquals(1,barWithCount.getReferenceCount()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test the undo section of the second-time rename. */ @Test public void testRenameUndo_3() throws Exception { final Path sdir1=new Path("/dir1"); final Path sdir2=new Path("/dir2"); final Path sdir3=new Path("/dir3"); hdfs.mkdirs(sdir1); hdfs.mkdirs(sdir2); hdfs.mkdirs(sdir3); final Path foo=new Path(sdir1,"foo"); final Path bar=new Path(foo,"bar"); DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPL,SEED); SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1"); SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s2"); INodeDirectory dir3=fsdir.getINode4Write(sdir3.toString()).asDirectory(); INodeDirectory mockDir3=spy(dir3); doReturn(false).when(mockDir3).addChild((INode)anyObject(),anyBoolean(),Mockito.anyInt()); INodeDirectory root=fsdir.getINode4Write("/").asDirectory(); root.replaceChild(dir3,mockDir3,fsdir.getINodeMap()); final Path foo_dir2=new Path(sdir2,"foo2"); final Path foo_dir3=new Path(sdir3,"foo3"); hdfs.rename(foo,foo_dir2); boolean result=hdfs.rename(foo_dir2,foo_dir3); assertFalse(result); INodeDirectory dir1Node=fsdir.getINode4Write(sdir1.toString()).asDirectory(); Snapshot s1=dir1Node.getSnapshot(DFSUtil.string2Bytes("s1")); INodeDirectory dir2Node=fsdir.getINode4Write(sdir2.toString()).asDirectory(); Snapshot s2=dir2Node.getSnapshot(DFSUtil.string2Bytes("s2")); ReadOnlyList dir2Children=dir2Node.getChildrenList(Snapshot.CURRENT_STATE_ID); assertEquals(1,dir2Children.size()); List dir2Diffs=dir2Node.getDiffs().asList(); assertEquals(1,dir2Diffs.size()); assertEquals(s2.getId(),dir2Diffs.get(0).getSnapshotId()); ChildrenDiff childrenDiff=dir2Diffs.get(0).getChildrenDiff(); assertEquals(0,childrenDiff.getList(ListType.DELETED).size()); assertEquals(1,childrenDiff.getList(ListType.CREATED).size()); final Path foo_s2=SnapshotTestHelper.getSnapshotPath(sdir2,"s2","foo2"); assertFalse(hdfs.exists(foo_s2)); INode fooNode=fsdir.getINode4Write(foo_dir2.toString()); assertTrue(childrenDiff.getList(ListType.CREATED).get(0) == fooNode); assertTrue(fooNode instanceof INodeReference.DstReference); List fooDiffs=fooNode.asDirectory().getDiffs().asList(); assertEquals(1,fooDiffs.size()); assertEquals(s1.getId(),fooDiffs.get(0).getSnapshotId()); hdfs.createSnapshot(sdir2,"s3"); result=hdfs.rename(foo_dir2,foo_dir3); assertFalse(result); dir2Node=fsdir.getINode4Write(sdir2.toString()).asDirectory(); Snapshot s3=dir2Node.getSnapshot(DFSUtil.string2Bytes("s3")); fooNode=fsdir.getINode4Write(foo_dir2.toString()); dir2Children=dir2Node.getChildrenList(Snapshot.CURRENT_STATE_ID); assertEquals(1,dir2Children.size()); dir2Diffs=dir2Node.getDiffs().asList(); assertEquals(2,dir2Diffs.size()); assertEquals(s2.getId(),dir2Diffs.get(0).getSnapshotId()); assertEquals(s3.getId(),dir2Diffs.get(1).getSnapshotId()); childrenDiff=dir2Diffs.get(0).getChildrenDiff(); assertEquals(0,childrenDiff.getList(ListType.DELETED).size()); assertEquals(1,childrenDiff.getList(ListType.CREATED).size()); assertTrue(childrenDiff.getList(ListType.CREATED).get(0) == fooNode); childrenDiff=dir2Diffs.get(1).getChildrenDiff(); assertEquals(0,childrenDiff.getList(ListType.DELETED).size()); assertEquals(0,childrenDiff.getList(ListType.CREATED).size()); final Path foo_s3=SnapshotTestHelper.getSnapshotPath(sdir2,"s3","foo2"); assertFalse(hdfs.exists(foo_s2)); assertTrue(hdfs.exists(foo_s3)); assertTrue(fooNode instanceof INodeReference.DstReference); fooDiffs=fooNode.asDirectory().getDiffs().asList(); assertEquals(2,fooDiffs.size()); assertEquals(s1.getId(),fooDiffs.get(0).getSnapshotId()); assertEquals(s3.getId(),fooDiffs.get(1).getSnapshotId()); }

UtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier 
/** * Test undo where dst node being overwritten is a reference node */ @Test public void testRenameUndo_4() throws Exception { final Path sdir1=new Path("/dir1"); final Path sdir2=new Path("/dir2"); final Path sdir3=new Path("/dir3"); hdfs.mkdirs(sdir1); hdfs.mkdirs(sdir2); hdfs.mkdirs(sdir3); final Path foo=new Path(sdir1,"foo"); final Path bar=new Path(foo,"bar"); DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPL,SEED); final Path foo2=new Path(sdir2,"foo2"); hdfs.mkdirs(foo2); SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1"); SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s2"); final Path foo3=new Path(sdir3,"foo3"); hdfs.rename(foo2,foo3); INode foo3Node=fsdir.getINode4Write(foo3.toString()); assertTrue(foo3Node.isReference()); INodeDirectory dir3=fsdir.getINode4Write(sdir3.toString()).asDirectory(); INodeDirectory mockDir3=spy(dir3); doReturn(false).when(mockDir3).addChild((INode)Mockito.isNull(),anyBoolean(),Mockito.anyInt()); Mockito.when(mockDir3.addChild((INode)Mockito.isNotNull(),anyBoolean(),Mockito.anyInt())).thenReturn(false).thenCallRealMethod(); INodeDirectory root=fsdir.getINode4Write("/").asDirectory(); root.replaceChild(dir3,mockDir3,fsdir.getINodeMap()); foo3Node.setParent(mockDir3); try { hdfs.rename(foo,foo3,Rename.OVERWRITE); fail("the rename from " + foo + " to "+ foo3+ " should fail"); } catch ( IOException e) { GenericTestUtils.assertExceptionContains("rename from " + foo + " to "+ foo3+ " failed.",e); } final INode foo3Node_undo=fsdir.getINode4Write(foo3.toString()); assertSame(foo3Node,foo3Node_undo); INodeReference.WithCount foo3_wc=(WithCount)foo3Node.asReference().getReferredINode(); assertEquals(2,foo3_wc.getReferenceCount()); assertSame(foo3Node,foo3_wc.getParentReference()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier 
/** * After the following operations: * Rename a dir -> create a snapshot s on dst tree -> rename the renamed dir * again -> delete snapshot s on dst tree * Make sure we only delete the snapshot s under the renamed dir. */ @Test public void testRenameDirAndDeleteSnapshot_4() throws Exception { final Path sdir1=new Path("/dir1"); final Path sdir2=new Path("/dir2"); final Path foo=new Path(sdir1,"foo"); final Path bar=new Path(foo,"bar"); DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPL,SEED); hdfs.mkdirs(sdir2); SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1"); SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s2"); final Path foo2=new Path(sdir2,"foo"); hdfs.rename(foo,foo2); final Path bar2=new Path(foo2,"bar2"); DFSTestUtil.createFile(hdfs,bar2,BLOCKSIZE,REPL,SEED); final Path bar3=new Path(foo2,"bar3"); DFSTestUtil.createFile(hdfs,bar3,BLOCKSIZE,REPL,SEED); hdfs.createSnapshot(sdir2,"s3"); hdfs.rename(foo2,foo); hdfs.deleteSnapshot(sdir2,"s3"); final INodeDirectory dir1Node=fsdir.getINode4Write(sdir1.toString()).asDirectory(); Quota.Counts q1=dir1Node.getDirectoryWithQuotaFeature().getSpaceConsumed(); assertEquals(9,q1.get(Quota.NAMESPACE)); final INodeDirectory dir2Node=fsdir.getINode4Write(sdir2.toString()).asDirectory(); Quota.Counts q2=dir2Node.getDirectoryWithQuotaFeature().getSpaceConsumed(); assertEquals(2,q2.get(Quota.NAMESPACE)); final Path foo_s1=SnapshotTestHelper.getSnapshotPath(sdir1,"s1",foo.getName()); final INode fooRef=fsdir.getINode(foo_s1.toString()); assertTrue(fooRef instanceof INodeReference.WithName); INodeReference.WithCount wc=(WithCount)fooRef.asReference().getReferredINode(); assertEquals(2,wc.getReferenceCount()); INodeDirectory fooNode=wc.getReferredINode().asDirectory(); ReadOnlyList children=fooNode.getChildrenList(Snapshot.CURRENT_STATE_ID); assertEquals(3,children.size()); assertEquals(bar.getName(),children.get(0).getLocalName()); assertEquals(bar2.getName(),children.get(1).getLocalName()); assertEquals(bar3.getName(),children.get(2).getLocalName()); List diffList=fooNode.getDiffs().asList(); assertEquals(1,diffList.size()); Snapshot s1=dir1Node.getSnapshot(DFSUtil.string2Bytes("s1")); assertEquals(s1.getId(),diffList.get(0).getSnapshotId()); ChildrenDiff diff=diffList.get(0).getChildrenDiff(); assertEquals(2,diff.getList(ListType.CREATED).size()); assertEquals(0,diff.getList(ListType.DELETED).size()); final INode fooRef2=fsdir.getINode4Write(foo.toString()); assertTrue(fooRef2 instanceof INodeReference.DstReference); INodeReference.WithCount wc2=(WithCount)fooRef2.asReference().getReferredINode(); assertSame(wc,wc2); assertSame(fooRef2,wc.getParentReference()); restartClusterAndCheckImage(true); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test rename a dir multiple times across snapshottable directories: * /dir1/foo -> /dir2/foo -> /dir3/foo -> /dir2/foo -> /dir1/foo * Create snapshots after each rename. */ @Test public void testRenameMoreThanOnceAcrossSnapDirs_2() throws Exception { final Path sdir1=new Path("/dir1"); final Path sdir2=new Path("/dir2"); final Path sdir3=new Path("/dir3"); hdfs.mkdirs(sdir1); hdfs.mkdirs(sdir2); hdfs.mkdirs(sdir3); final Path foo_dir1=new Path(sdir1,"foo"); final Path bar1_dir1=new Path(foo_dir1,"bar1"); final Path bar_dir1=new Path(sdir1,"bar"); DFSTestUtil.createFile(hdfs,bar1_dir1,BLOCKSIZE,REPL,SEED); DFSTestUtil.createFile(hdfs,bar_dir1,BLOCKSIZE,REPL,SEED); SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1"); SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s2"); SnapshotTestHelper.createSnapshot(hdfs,sdir3,"s3"); final Path foo_dir2=new Path(sdir2,"foo"); hdfs.rename(foo_dir1,foo_dir2); final Path bar_dir2=new Path(sdir2,"bar"); hdfs.rename(bar_dir1,bar_dir2); final Path bar1_dir2=new Path(foo_dir2,"bar1"); hdfs.setReplication(bar1_dir2,REPL_1); hdfs.setReplication(bar_dir2,REPL_1); restartClusterAndCheckImage(true); SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s11"); SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s22"); SnapshotTestHelper.createSnapshot(hdfs,sdir3,"s33"); final Path foo_dir3=new Path(sdir3,"foo"); hdfs.rename(foo_dir2,foo_dir3); final Path bar_dir3=new Path(sdir3,"bar"); hdfs.rename(bar_dir2,bar_dir3); final Path bar1_dir3=new Path(foo_dir3,"bar1"); hdfs.setReplication(bar1_dir3,REPL_2); hdfs.setReplication(bar_dir3,REPL_2); restartClusterAndCheckImage(true); SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s111"); SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s222"); SnapshotTestHelper.createSnapshot(hdfs,sdir3,"s333"); final Path bar1_s1=SnapshotTestHelper.getSnapshotPath(sdir1,"s1","foo/bar1"); final Path bar1_s22=SnapshotTestHelper.getSnapshotPath(sdir2,"s22","foo/bar1"); final Path bar1_s333=SnapshotTestHelper.getSnapshotPath(sdir3,"s333","foo/bar1"); final Path bar_s1=SnapshotTestHelper.getSnapshotPath(sdir1,"s1","bar"); final Path bar_s22=SnapshotTestHelper.getSnapshotPath(sdir2,"s22","bar"); final Path bar_s333=SnapshotTestHelper.getSnapshotPath(sdir3,"s333","bar"); assertTrue(hdfs.exists(bar1_s1)); assertTrue(hdfs.exists(bar1_s22)); assertTrue(hdfs.exists(bar1_s333)); assertTrue(hdfs.exists(bar_s1)); assertTrue(hdfs.exists(bar_s22)); assertTrue(hdfs.exists(bar_s333)); FileStatus statusBar1=hdfs.getFileStatus(bar1_s1); assertEquals(REPL,statusBar1.getReplication()); statusBar1=hdfs.getFileStatus(bar1_dir3); assertEquals(REPL_2,statusBar1.getReplication()); statusBar1=hdfs.getFileStatus(bar1_s22); assertEquals(REPL_1,statusBar1.getReplication()); statusBar1=hdfs.getFileStatus(bar1_s333); assertEquals(REPL_2,statusBar1.getReplication()); FileStatus statusBar=hdfs.getFileStatus(bar_s1); assertEquals(REPL,statusBar.getReplication()); statusBar=hdfs.getFileStatus(bar_dir3); assertEquals(REPL_2,statusBar.getReplication()); statusBar=hdfs.getFileStatus(bar_s22); assertEquals(REPL_1,statusBar.getReplication()); statusBar=hdfs.getFileStatus(bar_s333); assertEquals(REPL_2,statusBar.getReplication()); hdfs.rename(foo_dir3,foo_dir2); hdfs.rename(bar_dir3,bar_dir2); hdfs.setReplication(bar1_dir2,REPL); hdfs.setReplication(bar_dir2,REPL); restartClusterAndCheckImage(true); SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1111"); SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s2222"); final Path bar1_s2222=SnapshotTestHelper.getSnapshotPath(sdir2,"s2222","foo/bar1"); final Path bar_s2222=SnapshotTestHelper.getSnapshotPath(sdir2,"s2222","bar"); assertTrue(hdfs.exists(bar1_s1)); assertTrue(hdfs.exists(bar1_s22)); assertTrue(hdfs.exists(bar1_s333)); assertTrue(hdfs.exists(bar1_s2222)); assertTrue(hdfs.exists(bar_s1)); assertTrue(hdfs.exists(bar_s22)); assertTrue(hdfs.exists(bar_s333)); assertTrue(hdfs.exists(bar_s2222)); statusBar1=hdfs.getFileStatus(bar1_s1); assertEquals(REPL,statusBar1.getReplication()); statusBar1=hdfs.getFileStatus(bar1_dir2); assertEquals(REPL,statusBar1.getReplication()); statusBar1=hdfs.getFileStatus(bar1_s22); assertEquals(REPL_1,statusBar1.getReplication()); statusBar1=hdfs.getFileStatus(bar1_s333); assertEquals(REPL_2,statusBar1.getReplication()); statusBar1=hdfs.getFileStatus(bar1_s2222); assertEquals(REPL,statusBar1.getReplication()); statusBar=hdfs.getFileStatus(bar_s1); assertEquals(REPL,statusBar.getReplication()); statusBar=hdfs.getFileStatus(bar_dir2); assertEquals(REPL,statusBar.getReplication()); statusBar=hdfs.getFileStatus(bar_s22); assertEquals(REPL_1,statusBar.getReplication()); statusBar=hdfs.getFileStatus(bar_s333); assertEquals(REPL_2,statusBar.getReplication()); statusBar=hdfs.getFileStatus(bar_s2222); assertEquals(REPL,statusBar.getReplication()); hdfs.rename(foo_dir2,foo_dir1); hdfs.rename(bar_dir2,bar_dir1); INodeDirectory sdir1Node=fsdir.getINode(sdir1.toString()).asDirectory(); INodeDirectory sdir2Node=fsdir.getINode(sdir2.toString()).asDirectory(); INodeDirectory sdir3Node=fsdir.getINode(sdir3.toString()).asDirectory(); INodeReference fooRef=fsdir.getINode4Write(foo_dir1.toString()).asReference(); INodeReference.WithCount fooWithCount=(WithCount)fooRef.getReferredINode(); assertEquals(5,fooWithCount.getReferenceCount()); INodeDirectory foo=fooWithCount.asDirectory(); List fooDiffs=foo.getDiffs().asList(); assertEquals(4,fooDiffs.size()); Snapshot s2222=sdir2Node.getSnapshot(DFSUtil.string2Bytes("s2222")); Snapshot s333=sdir3Node.getSnapshot(DFSUtil.string2Bytes("s333")); Snapshot s22=sdir2Node.getSnapshot(DFSUtil.string2Bytes("s22")); Snapshot s1=sdir1Node.getSnapshot(DFSUtil.string2Bytes("s1")); assertEquals(s2222.getId(),fooDiffs.get(3).getSnapshotId()); assertEquals(s333.getId(),fooDiffs.get(2).getSnapshotId()); assertEquals(s22.getId(),fooDiffs.get(1).getSnapshotId()); assertEquals(s1.getId(),fooDiffs.get(0).getSnapshotId()); INodeFile bar1=fsdir.getINode4Write(bar1_dir1.toString()).asFile(); List bar1Diffs=bar1.getDiffs().asList(); assertEquals(3,bar1Diffs.size()); assertEquals(s333.getId(),bar1Diffs.get(2).getSnapshotId()); assertEquals(s22.getId(),bar1Diffs.get(1).getSnapshotId()); assertEquals(s1.getId(),bar1Diffs.get(0).getSnapshotId()); INodeReference barRef=fsdir.getINode4Write(bar_dir1.toString()).asReference(); INodeReference.WithCount barWithCount=(WithCount)barRef.getReferredINode(); assertEquals(5,barWithCount.getReferenceCount()); INodeFile bar=barWithCount.asFile(); List barDiffs=bar.getDiffs().asList(); assertEquals(4,barDiffs.size()); assertEquals(s2222.getId(),barDiffs.get(3).getSnapshotId()); assertEquals(s333.getId(),barDiffs.get(2).getSnapshotId()); assertEquals(s22.getId(),barDiffs.get(1).getSnapshotId()); assertEquals(s1.getId(),barDiffs.get(0).getSnapshotId()); restartClusterAndCheckImage(true); hdfs.delete(foo_dir1,true); hdfs.delete(bar_dir1,true); restartClusterAndCheckImage(true); final Path bar1_s1111=SnapshotTestHelper.getSnapshotPath(sdir1,"s1111","foo/bar1"); final Path bar_s1111=SnapshotTestHelper.getSnapshotPath(sdir1,"s1111","bar"); assertTrue(hdfs.exists(bar1_s1)); assertTrue(hdfs.exists(bar1_s22)); assertTrue(hdfs.exists(bar1_s333)); assertTrue(hdfs.exists(bar1_s2222)); assertFalse(hdfs.exists(bar1_s1111)); assertTrue(hdfs.exists(bar_s1)); assertTrue(hdfs.exists(bar_s22)); assertTrue(hdfs.exists(bar_s333)); assertTrue(hdfs.exists(bar_s2222)); assertFalse(hdfs.exists(bar_s1111)); final Path foo_s2222=SnapshotTestHelper.getSnapshotPath(sdir2,"s2222","foo"); fooRef=fsdir.getINode(foo_s2222.toString()).asReference(); fooWithCount=(WithCount)fooRef.getReferredINode(); assertEquals(4,fooWithCount.getReferenceCount()); foo=fooWithCount.asDirectory(); fooDiffs=foo.getDiffs().asList(); assertEquals(4,fooDiffs.size()); assertEquals(s2222.getId(),fooDiffs.get(3).getSnapshotId()); bar1Diffs=bar1.getDiffs().asList(); assertEquals(3,bar1Diffs.size()); assertEquals(s333.getId(),bar1Diffs.get(2).getSnapshotId()); barRef=fsdir.getINode(bar_s2222.toString()).asReference(); barWithCount=(WithCount)barRef.getReferredINode(); assertEquals(4,barWithCount.getReferenceCount()); bar=barWithCount.asFile(); barDiffs=bar.getDiffs().asList(); assertEquals(4,barDiffs.size()); assertEquals(s2222.getId(),barDiffs.get(3).getSnapshotId()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier 
/** * Test rename while the rename operation will exceed the quota in the dst * tree. */ @Test public void testRenameUndo_5() throws Exception { final Path test=new Path("/test"); final Path dir1=new Path(test,"dir1"); final Path dir2=new Path(test,"dir2"); final Path subdir2=new Path(dir2,"subdir2"); hdfs.mkdirs(dir1); hdfs.mkdirs(subdir2); final Path foo=new Path(dir1,"foo"); final Path bar=new Path(foo,"bar"); DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPL,SEED); SnapshotTestHelper.createSnapshot(hdfs,dir1,"s1"); SnapshotTestHelper.createSnapshot(hdfs,dir2,"s2"); hdfs.setQuota(dir2,5,Long.MAX_VALUE - 1); final Path foo2=new Path(subdir2,foo.getName()); boolean rename=hdfs.rename(foo,foo2); assertFalse(rename); assertTrue(hdfs.exists(foo)); assertTrue(hdfs.exists(bar)); INodeDirectory dir1Node=fsdir.getINode4Write(dir1.toString()).asDirectory(); List childrenList=ReadOnlyList.Util.asList(dir1Node.getChildrenList(Snapshot.CURRENT_STATE_ID)); assertEquals(1,childrenList.size()); INode fooNode=childrenList.get(0); assertTrue(fooNode.asDirectory().isWithSnapshot()); INode barNode=fsdir.getINode4Write(bar.toString()); assertTrue(barNode.getClass() == INodeFile.class); assertSame(fooNode,barNode.getParent()); List diffList=dir1Node.getDiffs().asList(); assertEquals(1,diffList.size()); DirectoryDiff diff=diffList.get(0); assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty()); assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty()); INodeDirectory dir2Node=fsdir.getINode4Write(dir2.toString()).asDirectory(); assertTrue(dir2Node.isSnapshottable()); Quota.Counts counts=dir2Node.computeQuotaUsage(); assertEquals(3,counts.get(Quota.NAMESPACE)); assertEquals(0,counts.get(Quota.DISKSPACE)); childrenList=ReadOnlyList.Util.asList(dir2Node.asDirectory().getChildrenList(Snapshot.CURRENT_STATE_ID)); assertEquals(1,childrenList.size()); INode subdir2Node=childrenList.get(0); assertSame(dir2Node,subdir2Node.getParent()); assertSame(subdir2Node,fsdir.getINode4Write(subdir2.toString())); diffList=dir2Node.getDiffs().asList(); assertEquals(1,diffList.size()); diff=diffList.get(0); assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty()); assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * After the following operations: * Rename a dir -> create a snapshot s on dst tree -> delete the renamed dir * -> delete snapshot s on dst tree * Make sure we destroy everything created after the rename under the renamed * dir. */ @Test public void testRenameDirAndDeleteSnapshot_3() throws Exception { final Path sdir1=new Path("/dir1"); final Path sdir2=new Path("/dir2"); final Path foo=new Path(sdir1,"foo"); final Path bar=new Path(foo,"bar"); DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPL,SEED); hdfs.mkdirs(sdir2); SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1"); SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s2"); final Path foo2=new Path(sdir2,"foo"); hdfs.rename(foo,foo2); final Path bar2=new Path(foo2,"bar2"); DFSTestUtil.createFile(hdfs,bar2,BLOCKSIZE,REPL,SEED); final Path bar3=new Path(foo2,"bar3"); DFSTestUtil.createFile(hdfs,bar3,BLOCKSIZE,REPL,SEED); hdfs.createSnapshot(sdir2,"s3"); hdfs.delete(foo2,true); hdfs.deleteSnapshot(sdir2,"s3"); final INodeDirectory dir1Node=fsdir.getINode4Write(sdir1.toString()).asDirectory(); Quota.Counts q1=dir1Node.getDirectoryWithQuotaFeature().getSpaceConsumed(); assertEquals(4,q1.get(Quota.NAMESPACE)); final INodeDirectory dir2Node=fsdir.getINode4Write(sdir2.toString()).asDirectory(); Quota.Counts q2=dir2Node.getDirectoryWithQuotaFeature().getSpaceConsumed(); assertEquals(2,q2.get(Quota.NAMESPACE)); final Path foo_s1=SnapshotTestHelper.getSnapshotPath(sdir1,"s1",foo.getName()); INode fooRef=fsdir.getINode(foo_s1.toString()); assertTrue(fooRef instanceof INodeReference.WithName); INodeReference.WithCount wc=(WithCount)fooRef.asReference().getReferredINode(); assertEquals(1,wc.getReferenceCount()); INodeDirectory fooNode=wc.getReferredINode().asDirectory(); ReadOnlyList children=fooNode.getChildrenList(Snapshot.CURRENT_STATE_ID); assertEquals(1,children.size()); assertEquals(bar.getName(),children.get(0).getLocalName()); List diffList=fooNode.getDiffs().asList(); assertEquals(1,diffList.size()); Snapshot s1=dir1Node.getSnapshot(DFSUtil.string2Bytes("s1")); assertEquals(s1.getId(),diffList.get(0).getSnapshotId()); ChildrenDiff diff=diffList.get(0).getChildrenDiff(); assertEquals(0,diff.getList(ListType.CREATED).size()); assertEquals(0,diff.getList(ListType.DELETED).size()); restartClusterAndCheckImage(true); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test the rename undo when quota of dst tree is exceeded after rename. */ @Test public void testRenameExceedQuota() throws Exception { final Path test=new Path("/test"); final Path dir1=new Path(test,"dir1"); final Path dir2=new Path(test,"dir2"); final Path sub_dir2=new Path(dir2,"subdir"); final Path subfile_dir2=new Path(sub_dir2,"subfile"); hdfs.mkdirs(dir1); DFSTestUtil.createFile(hdfs,subfile_dir2,BLOCKSIZE,REPL,SEED); final Path foo=new Path(dir1,"foo"); DFSTestUtil.createFile(hdfs,foo,BLOCKSIZE,REPL,SEED); SnapshotTestHelper.createSnapshot(hdfs,dir1,"s1"); SnapshotTestHelper.createSnapshot(hdfs,dir2,"s2"); hdfs.setQuota(dir2,5,Long.MAX_VALUE - 1); hdfs.rename(foo,subfile_dir2,Rename.OVERWRITE); INode dir2Node=fsdir.getINode4Write(dir2.toString()); assertTrue(dir2Node.asDirectory().isSnapshottable()); Quota.Counts counts=dir2Node.computeQuotaUsage(); assertEquals(7,counts.get(Quota.NAMESPACE)); assertEquals(BLOCKSIZE * REPL * 2,counts.get(Quota.DISKSPACE)); }

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier 
/** * Test the rename undo when removing dst node fails */ @Test public void testRenameUndo_6() throws Exception { final Path test=new Path("/test"); final Path dir1=new Path(test,"dir1"); final Path dir2=new Path(test,"dir2"); final Path sub_dir2=new Path(dir2,"subdir"); final Path subsub_dir2=new Path(sub_dir2,"subdir"); hdfs.mkdirs(dir1); hdfs.mkdirs(subsub_dir2); final Path foo=new Path(dir1,"foo"); hdfs.mkdirs(foo); SnapshotTestHelper.createSnapshot(hdfs,dir1,"s1"); SnapshotTestHelper.createSnapshot(hdfs,dir2,"s2"); hdfs.setQuota(dir2,4,Long.MAX_VALUE - 1); try { hdfs.rename(foo,subsub_dir2,Rename.OVERWRITE); fail("Expect QuotaExceedException"); } catch ( QuotaExceededException e) { String msg="Failed to record modification for snapshot: " + "The NameSpace quota (directories and files)" + " is exceeded: quota=4 file count=5"; GenericTestUtils.assertExceptionContains(msg,e); } assertTrue(hdfs.exists(foo)); INodeDirectory dir1Node=fsdir.getINode4Write(dir1.toString()).asDirectory(); List childrenList=ReadOnlyList.Util.asList(dir1Node.getChildrenList(Snapshot.CURRENT_STATE_ID)); assertEquals(1,childrenList.size()); INode fooNode=childrenList.get(0); assertTrue(fooNode.asDirectory().isWithSnapshot()); assertSame(dir1Node,fooNode.getParent()); List diffList=dir1Node.getDiffs().asList(); assertEquals(1,diffList.size()); DirectoryDiff diff=diffList.get(0); assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty()); assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty()); INodeDirectory dir2Node=fsdir.getINode4Write(dir2.toString()).asDirectory(); assertTrue(dir2Node.isSnapshottable()); Quota.Counts counts=dir2Node.computeQuotaUsage(); assertEquals(4,counts.get(Quota.NAMESPACE)); assertEquals(0,counts.get(Quota.DISKSPACE)); childrenList=ReadOnlyList.Util.asList(dir2Node.asDirectory().getChildrenList(Snapshot.CURRENT_STATE_ID)); assertEquals(1,childrenList.size()); INode subdir2Node=childrenList.get(0); assertTrue(subdir2Node.asDirectory().isWithSnapshot()); assertSame(dir2Node,subdir2Node.getParent()); assertSame(subdir2Node,fsdir.getINode4Write(sub_dir2.toString())); INode subsubdir2Node=fsdir.getINode4Write(subsub_dir2.toString()); assertTrue(subsubdir2Node.getClass() == INodeDirectory.class); assertSame(subdir2Node,subsubdir2Node.getParent()); diffList=(dir2Node).getDiffs().asList(); assertEquals(1,diffList.size()); diff=diffList.get(0); assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty()); assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty()); diffList=subdir2Node.asDirectory().getDiffs().asList(); assertEquals(0,diffList.size()); }

InternalCallVerifier BooleanVerifier IdentityVerifier HybridVerifier 
/** * This test demonstrates that {@link INodeDirectory#removeChild(INode,Snapshot)}and {@link INodeDirectory#addChild(INode,boolean,Snapshot)}should use {@link INode#isInLatestSnapshot(Snapshot)} to check if the * added/removed child should be recorded in snapshots. */ @Test public void testRenameDirAndDeleteSnapshot_5() throws Exception { final Path dir1=new Path("/dir1"); final Path dir2=new Path("/dir2"); final Path dir3=new Path("/dir3"); hdfs.mkdirs(dir1); hdfs.mkdirs(dir2); hdfs.mkdirs(dir3); final Path foo=new Path(dir1,"foo"); hdfs.mkdirs(foo); SnapshotTestHelper.createSnapshot(hdfs,dir1,"s1"); final Path bar=new Path(foo,"bar"); DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPL,SEED); hdfs.deleteSnapshot(dir1,"s1"); SnapshotTestHelper.createSnapshot(hdfs,dir2,"s2"); final Path foo2=new Path(dir2,foo.getName()); hdfs.rename(foo,foo2); final Path bar2=new Path(dir2,"foo/bar"); final Path bar3=new Path(dir3,"bar"); hdfs.rename(bar2,bar3); hdfs.delete(foo2,true); assertTrue(hdfs.exists(bar3)); INodeFile barNode=(INodeFile)fsdir.getINode4Write(bar3.toString()); assertSame(fsdir.getINode4Write(dir3.toString()),barNode.getParent()); }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
/** * Test rename where the src/dst directories are both snapshottable * directories without snapshots. In such case we need to update the * snapshottable dir list in SnapshotManager. */ @Test(timeout=60000) public void testRenameAndUpdateSnapshottableDirs() throws Exception { final Path sdir1=new Path("/dir1"); final Path sdir2=new Path("/dir2"); final Path foo=new Path(sdir1,"foo"); final Path bar=new Path(sdir2,"bar"); hdfs.mkdirs(foo); hdfs.mkdirs(bar); hdfs.allowSnapshot(foo); SnapshotTestHelper.createSnapshot(hdfs,bar,snap1); assertEquals(2,fsn.getSnapshottableDirListing().length); INodeDirectory fooNode=fsdir.getINode4Write(foo.toString()).asDirectory(); long fooId=fooNode.getId(); try { hdfs.rename(foo,bar,Rename.OVERWRITE); fail("Expect exception since " + bar + " is snapshottable and already has snapshots"); } catch ( IOException e) { GenericTestUtils.assertExceptionContains(bar.toString() + " is snapshottable and already has snapshots",e); } hdfs.deleteSnapshot(bar,snap1); hdfs.rename(foo,bar,Rename.OVERWRITE); SnapshottableDirectoryStatus[] dirs=fsn.getSnapshottableDirListing(); assertEquals(1,dirs.length); assertEquals(bar,dirs[0].getFullPath()); assertEquals(fooId,dirs[0].getDirStatus().getFileId()); }

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier 
/** * Test rename to an invalid name (xxx/.snapshot) */ @Test public void testRenameUndo_7() throws Exception { final Path root=new Path("/"); final Path foo=new Path(root,"foo"); final Path bar=new Path(foo,"bar"); DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPL,SEED); SnapshotTestHelper.createSnapshot(hdfs,root,snap1); final Path invalid=new Path(foo,HdfsConstants.DOT_SNAPSHOT_DIR); try { hdfs.rename(bar,invalid); fail("expect exception since invalid name is used for rename"); } catch ( Exception e) { GenericTestUtils.assertExceptionContains("\"" + HdfsConstants.DOT_SNAPSHOT_DIR + "\" is a reserved name",e); } INodeDirectory rootNode=fsdir.getINode4Write(root.toString()).asDirectory(); INodeDirectory fooNode=fsdir.getINode4Write(foo.toString()).asDirectory(); ReadOnlyList children=fooNode.getChildrenList(Snapshot.CURRENT_STATE_ID); assertEquals(1,children.size()); List diffList=fooNode.getDiffs().asList(); assertEquals(1,diffList.size()); DirectoryDiff diff=diffList.get(0); Snapshot s1=rootNode.getSnapshot(DFSUtil.string2Bytes(snap1)); assertEquals(s1.getId(),diff.getSnapshotId()); assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty()); assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty()); INodeFile barNode=fsdir.getINode4Write(bar.toString()).asFile(); assertSame(barNode,children.get(0)); assertSame(fooNode,barNode.getParent()); List barDiffList=barNode.getDiffs().asList(); assertEquals(1,barDiffList.size()); FileDiff barDiff=barDiffList.get(0); assertEquals(s1.getId(),barDiff.getSnapshotId()); hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER); hdfs.saveNamespace(); hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE); cluster.shutdown(); cluster=new MiniDFSCluster.Builder(conf).format(false).numDataNodes(REPL).build(); cluster.waitActive(); restartClusterAndCheckImage(true); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Rename a single file across snapshottable dirs. */ @Test(timeout=60000) public void testRenameFileAcrossSnapshottableDirs() throws Exception { final Path sdir1=new Path("/dir1"); final Path sdir2=new Path("/dir2"); hdfs.mkdirs(sdir1); hdfs.mkdirs(sdir2); final Path foo=new Path(sdir2,"foo"); DFSTestUtil.createFile(hdfs,foo,BLOCKSIZE,REPL,SEED); SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1"); SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s2"); hdfs.createSnapshot(sdir1,"s3"); final Path newfoo=new Path(sdir1,"foo"); hdfs.rename(foo,newfoo); hdfs.setReplication(newfoo,REPL_1); final Path foo_s2=SnapshotTestHelper.getSnapshotPath(sdir2,"s2","foo"); assertTrue(hdfs.exists(foo_s2)); FileStatus status=hdfs.getFileStatus(foo_s2); assertEquals(REPL,status.getReplication()); final Path foo_s3=SnapshotTestHelper.getSnapshotPath(sdir1,"s3","foo"); assertFalse(hdfs.exists(foo_s3)); INodeDirectory sdir2Node=fsdir.getINode(sdir2.toString()).asDirectory(); Snapshot s2=sdir2Node.getSnapshot(DFSUtil.string2Bytes("s2")); INodeFile sfoo=fsdir.getINode(newfoo.toString()).asFile(); assertEquals(s2.getId(),sfoo.getDiffs().getLastSnapshotId()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * After the following steps: *
 * 1. Take snapshot s1 on /dir1 at time t1.
 * 2. Take snapshot s2 on /dir2 at time t2.
 * 3. Modify the subtree of /dir2/foo/ to make it a dir with snapshots.
 * 4. Take snapshot s3 on /dir1 at time t3.
 * 5. Rename /dir2/foo/ to /dir1/foo/.
 * 
* When changes happening on foo, the diff should be recorded in snapshot s2. */ @Test(timeout=60000) public void testRenameDirAcrossSnapshottableDirs() throws Exception { final Path sdir1=new Path("/dir1"); final Path sdir2=new Path("/dir2"); hdfs.mkdirs(sdir1); hdfs.mkdirs(sdir2); final Path foo=new Path(sdir2,"foo"); final Path bar=new Path(foo,"bar"); final Path bar2=new Path(foo,"bar2"); DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPL,SEED); DFSTestUtil.createFile(hdfs,bar2,BLOCKSIZE,REPL,SEED); SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1"); SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s2"); hdfs.setReplication(bar2,REPL_1); hdfs.delete(bar,true); hdfs.createSnapshot(sdir1,"s3"); final Path newfoo=new Path(sdir1,"foo"); hdfs.rename(foo,newfoo); final Path snapshotBar=SnapshotTestHelper.getSnapshotPath(sdir2,"s2","foo/bar"); assertTrue(hdfs.exists(snapshotBar)); final Path newBar2=new Path(newfoo,"bar2"); assertTrue(hdfs.exists(newBar2)); hdfs.delete(newBar2,true); final Path bar2_s2=SnapshotTestHelper.getSnapshotPath(sdir2,"s2","foo/bar2"); assertTrue(hdfs.exists(bar2_s2)); FileStatus status=hdfs.getFileStatus(bar2_s2); assertEquals(REPL,status.getReplication()); final Path bar2_s3=SnapshotTestHelper.getSnapshotPath(sdir1,"s3","foo/bar2"); assertFalse(hdfs.exists(bar2_s3)); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * After rename, delete the snapshot in src */ @Test public void testRenameDirAndDeleteSnapshot_2() throws Exception { final Path sdir1=new Path("/dir1"); final Path sdir2=new Path("/dir2"); hdfs.mkdirs(sdir1); hdfs.mkdirs(sdir2); final Path foo=new Path(sdir2,"foo"); final Path bar=new Path(foo,"bar"); DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPL,SEED); SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1"); SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s2"); SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s3"); final Path newfoo=new Path(sdir1,"foo"); hdfs.rename(foo,newfoo); restartClusterAndCheckImage(true); final Path bar2=new Path(newfoo,"bar2"); DFSTestUtil.createFile(hdfs,bar2,BLOCKSIZE,REPL,SEED); hdfs.createSnapshot(sdir1,"s4"); hdfs.delete(newfoo,true); final Path bar2_s4=SnapshotTestHelper.getSnapshotPath(sdir1,"s4","foo/bar2"); assertTrue(hdfs.exists(bar2_s4)); final Path bar_s4=SnapshotTestHelper.getSnapshotPath(sdir1,"s4","foo/bar"); assertTrue(hdfs.exists(bar_s4)); hdfs.deleteSnapshot(sdir1,"s4"); restartClusterAndCheckImage(true); Path bar_s3=SnapshotTestHelper.getSnapshotPath(sdir1,"s3","foo/bar"); assertFalse(hdfs.exists(bar_s3)); bar_s3=SnapshotTestHelper.getSnapshotPath(sdir2,"s3","foo/bar"); assertTrue(hdfs.exists(bar_s3)); Path bar2_s3=SnapshotTestHelper.getSnapshotPath(sdir1,"s3","foo/bar2"); assertFalse(hdfs.exists(bar2_s3)); bar2_s3=SnapshotTestHelper.getSnapshotPath(sdir2,"s3","foo/bar2"); assertFalse(hdfs.exists(bar2_s3)); hdfs.deleteSnapshot(sdir2,"s3"); final Path bar_s2=SnapshotTestHelper.getSnapshotPath(sdir2,"s2","foo/bar"); assertTrue(hdfs.exists(bar_s2)); INodeDirectory sdir2Node=fsdir.getINode(sdir2.toString()).asDirectory(); Snapshot s2=sdir2Node.getSnapshot(DFSUtil.string2Bytes("s2")); final Path foo_s2=SnapshotTestHelper.getSnapshotPath(sdir2,"s2","foo"); INodeReference fooRef=fsdir.getINode(foo_s2.toString()).asReference(); assertTrue(fooRef instanceof INodeReference.WithName); INodeReference.WithCount fooWC=(WithCount)fooRef.getReferredINode(); assertEquals(1,fooWC.getReferenceCount()); INodeDirectory fooDir=fooWC.getReferredINode().asDirectory(); List diffs=fooDir.getDiffs().asList(); assertEquals(1,diffs.size()); assertEquals(s2.getId(),diffs.get(0).getSnapshotId()); restartClusterAndCheckImage(true); hdfs.deleteSnapshot(sdir2,"s2"); assertFalse(hdfs.exists(bar_s2)); restartClusterAndCheckImage(true); Quota.Counts q=fsdir.getRoot().getDirectoryWithQuotaFeature().getSpaceConsumed(); assertEquals(4,q.get(Quota.NAMESPACE)); assertEquals(0,q.get(Quota.DISKSPACE)); hdfs.deleteSnapshot(sdir1,"s1"); restartClusterAndCheckImage(true); q=fsdir.getRoot().getDirectoryWithQuotaFeature().getSpaceConsumed(); assertEquals(3,q.get(Quota.NAMESPACE)); assertEquals(0,q.get(Quota.DISKSPACE)); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testRenameWithOverWrite() throws Exception { final Path root=new Path("/"); final Path foo=new Path(root,"foo"); final Path file1InFoo=new Path(foo,"file1"); final Path file2InFoo=new Path(foo,"file2"); final Path file3InFoo=new Path(foo,"file3"); DFSTestUtil.createFile(hdfs,file1InFoo,1L,REPL,SEED); DFSTestUtil.createFile(hdfs,file2InFoo,1L,REPL,SEED); DFSTestUtil.createFile(hdfs,file3InFoo,1L,REPL,SEED); final Path bar=new Path(root,"bar"); hdfs.mkdirs(bar); SnapshotTestHelper.createSnapshot(hdfs,root,"s0"); final Path fileInBar=new Path(bar,"file1"); hdfs.rename(file1InFoo,fileInBar); final Path newDir=new Path(root,"newDir"); hdfs.rename(bar,newDir); final Path file2InNewDir=new Path(newDir,"file2"); hdfs.rename(file2InFoo,file2InNewDir); final Path file1InNewDir=new Path(newDir,"file1"); hdfs.rename(file3InFoo,file1InNewDir,Rename.OVERWRITE); SnapshotTestHelper.createSnapshot(hdfs,root,"s1"); SnapshotDiffReport report=hdfs.getSnapshotDiffReport(root,"s0","s1"); LOG.info("DiffList is \n\"" + report.toString() + "\""); List entries=report.getDiffList(); assertEquals(7,entries.size()); assertTrue(existsInDiffReport(entries,DiffType.MODIFY,"",null)); assertTrue(existsInDiffReport(entries,DiffType.MODIFY,foo.getName(),null)); assertTrue(existsInDiffReport(entries,DiffType.MODIFY,bar.getName(),null)); assertTrue(existsInDiffReport(entries,DiffType.DELETE,"foo/file1",null)); assertTrue(existsInDiffReport(entries,DiffType.RENAME,"bar","newDir")); assertTrue(existsInDiffReport(entries,DiffType.RENAME,"foo/file2","newDir/file2")); assertTrue(existsInDiffReport(entries,DiffType.RENAME,"foo/file3","newDir/file1")); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test renaming a file and then delete snapshots. */ @Test public void testRenameFileAndDeleteSnapshot() throws Exception { final Path sdir1=new Path("/dir1"); final Path sdir2=new Path("/dir2"); hdfs.mkdirs(sdir1); hdfs.mkdirs(sdir2); final Path foo=new Path(sdir2,"foo"); DFSTestUtil.createFile(hdfs,foo,BLOCKSIZE,REPL,SEED); SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1"); SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s2"); hdfs.createSnapshot(sdir1,"s3"); final Path newfoo=new Path(sdir1,"foo"); hdfs.rename(foo,newfoo); hdfs.setReplication(newfoo,REPL_1); hdfs.createSnapshot(sdir1,"s4"); hdfs.setReplication(newfoo,REPL_2); FileStatus status=hdfs.getFileStatus(newfoo); assertEquals(REPL_2,status.getReplication()); final Path foo_s4=SnapshotTestHelper.getSnapshotPath(sdir1,"s4","foo"); status=hdfs.getFileStatus(foo_s4); assertEquals(REPL_1,status.getReplication()); hdfs.createSnapshot(sdir1,"s5"); final Path foo_s5=SnapshotTestHelper.getSnapshotPath(sdir1,"s5","foo"); status=hdfs.getFileStatus(foo_s5); assertEquals(REPL_2,status.getReplication()); hdfs.deleteSnapshot(sdir1,"s5"); restartClusterAndCheckImage(true); assertFalse(hdfs.exists(foo_s5)); status=hdfs.getFileStatus(foo_s4); assertEquals(REPL_1,status.getReplication()); hdfs.deleteSnapshot(sdir1,"s4"); assertFalse(hdfs.exists(foo_s4)); Path foo_s3=SnapshotTestHelper.getSnapshotPath(sdir1,"s3","foo"); assertFalse(hdfs.exists(foo_s3)); foo_s3=SnapshotTestHelper.getSnapshotPath(sdir2,"s3","foo"); assertFalse(hdfs.exists(foo_s3)); final Path foo_s2=SnapshotTestHelper.getSnapshotPath(sdir2,"s2","foo"); assertTrue(hdfs.exists(foo_s2)); status=hdfs.getFileStatus(foo_s2); assertEquals(REPL,status.getReplication()); INodeFile snode=fsdir.getINode(newfoo.toString()).asFile(); assertEquals(1,snode.getDiffs().asList().size()); INodeDirectory sdir2Node=fsdir.getINode(sdir2.toString()).asDirectory(); Snapshot s2=sdir2Node.getSnapshot(DFSUtil.string2Bytes("s2")); assertEquals(s2.getId(),snode.getDiffs().getLastSnapshotId()); restartClusterAndCheckImage(true); hdfs.deleteSnapshot(sdir2,"s2"); assertFalse(hdfs.exists(foo_s2)); restartClusterAndCheckImage(true); hdfs.deleteSnapshot(sdir1,"s3"); restartClusterAndCheckImage(true); hdfs.deleteSnapshot(sdir1,"s1"); restartClusterAndCheckImage(true); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=60000) public void testRenameDirectoryInSnapshot() throws Exception { final Path sub2=new Path(sub1,"sub2"); final Path sub3=new Path(sub1,"sub3"); final Path sub2file1=new Path(sub2,"sub2file1"); final String sub1snap1="sub1snap1"; hdfs.mkdirs(sub1); hdfs.mkdirs(sub2); DFSTestUtil.createFile(hdfs,sub2file1,BLOCKSIZE,REPL,SEED); SnapshotTestHelper.createSnapshot(hdfs,sub1,sub1snap1); hdfs.rename(sub2,sub3); SnapshotDiffReport diffReport=hdfs.getSnapshotDiffReport(sub1,sub1snap1,""); LOG.info("DiffList is \n\"" + diffReport.toString() + "\""); List entries=diffReport.getDiffList(); assertEquals(2,entries.size()); assertTrue(existsInDiffReport(entries,DiffType.MODIFY,"",null)); assertTrue(existsInDiffReport(entries,DiffType.RENAME,sub2.getName(),sub3.getName())); }

Class: org.apache.hadoop.hdfs.server.namenode.snapshot.TestSetQuotaWithSnapshot

APIUtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier 
/** * Test clear quota of a snapshottable dir or a dir with snapshot. */ @Test public void testClearQuota() throws Exception { final Path dir=new Path("/TestSnapshot"); hdfs.mkdirs(dir); hdfs.allowSnapshot(dir); hdfs.setQuota(dir,HdfsConstants.QUOTA_DONT_SET,HdfsConstants.QUOTA_DONT_SET); INodeDirectory dirNode=fsdir.getINode4Write(dir.toString()).asDirectory(); assertTrue(dirNode.isSnapshottable()); assertEquals(0,dirNode.getDiffs().asList().size()); hdfs.setQuota(dir,HdfsConstants.QUOTA_DONT_SET - 1,HdfsConstants.QUOTA_DONT_SET - 1); dirNode=fsdir.getINode4Write(dir.toString()).asDirectory(); assertTrue(dirNode.isSnapshottable()); assertEquals(0,dirNode.getDiffs().asList().size()); hdfs.setQuota(dir,HdfsConstants.QUOTA_RESET,HdfsConstants.QUOTA_RESET); dirNode=fsdir.getINode4Write(dir.toString()).asDirectory(); assertTrue(dirNode.isSnapshottable()); assertEquals(0,dirNode.getDiffs().asList().size()); SnapshotTestHelper.createSnapshot(hdfs,dir,"s1"); hdfs.setQuota(dir,HdfsConstants.QUOTA_RESET,HdfsConstants.QUOTA_RESET); dirNode=fsdir.getINode4Write(dir.toString()).asDirectory(); assertTrue(dirNode.isSnapshottable()); assertEquals(1,dirNode.getDiffs().asList().size()); SnapshottableDirectoryStatus[] status=hdfs.getSnapshottableDirListing(); assertEquals(1,status.length); assertEquals(dir,status[0].getFullPath()); final Path subDir=new Path(dir,"sub"); hdfs.mkdirs(subDir); hdfs.createSnapshot(dir,"s2"); final Path file=new Path(subDir,"file"); DFSTestUtil.createFile(hdfs,file,BLOCKSIZE,REPLICATION,seed); hdfs.setQuota(dir,HdfsConstants.QUOTA_RESET,HdfsConstants.QUOTA_RESET); INode subNode=fsdir.getINode4Write(subDir.toString()); assertTrue(subNode.asDirectory().isWithSnapshot()); List diffList=subNode.asDirectory().getDiffs().asList(); assertEquals(1,diffList.size()); Snapshot s2=dirNode.getSnapshot(DFSUtil.string2Bytes("s2")); assertEquals(s2.getId(),diffList.get(0).getSnapshotId()); List createdList=diffList.get(0).getChildrenDiff().getList(ListType.CREATED); assertEquals(1,createdList.size()); assertSame(fsdir.getINode4Write(file.toString()),createdList.get(0)); }

Class: org.apache.hadoop.hdfs.server.namenode.snapshot.TestSnapshot

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test multiple calls of allowSnapshot and disallowSnapshot, to make sure * they are idempotent */ @Test public void testAllowAndDisallowSnapshot() throws Exception { final Path dir=new Path("/dir"); final Path file0=new Path(dir,"file0"); final Path file1=new Path(dir,"file1"); DFSTestUtil.createFile(hdfs,file0,BLOCKSIZE,REPLICATION,seed); DFSTestUtil.createFile(hdfs,file1,BLOCKSIZE,REPLICATION,seed); INodeDirectory dirNode=fsdir.getINode4Write(dir.toString()).asDirectory(); assertFalse(dirNode.isSnapshottable()); hdfs.allowSnapshot(dir); dirNode=fsdir.getINode4Write(dir.toString()).asDirectory(); assertTrue(dirNode.isSnapshottable()); hdfs.allowSnapshot(dir); dirNode=fsdir.getINode4Write(dir.toString()).asDirectory(); assertTrue(dirNode.isSnapshottable()); hdfs.disallowSnapshot(dir); dirNode=fsdir.getINode4Write(dir.toString()).asDirectory(); assertFalse(dirNode.isSnapshottable()); hdfs.disallowSnapshot(dir); dirNode=fsdir.getINode4Write(dir.toString()).asDirectory(); assertFalse(dirNode.isSnapshottable()); final Path root=new Path("/"); INodeDirectory rootNode=fsdir.getINode4Write(root.toString()).asDirectory(); assertTrue(rootNode.isSnapshottable()); assertEquals(0,rootNode.getDirectorySnapshottableFeature().getSnapshotQuota()); hdfs.allowSnapshot(root); rootNode=fsdir.getINode4Write(root.toString()).asDirectory(); assertTrue(rootNode.isSnapshottable()); assertEquals(DirectorySnapshottableFeature.SNAPSHOT_LIMIT,rootNode.getDirectorySnapshottableFeature().getSnapshotQuota()); hdfs.allowSnapshot(root); rootNode=fsdir.getINode4Write(root.toString()).asDirectory(); assertTrue(rootNode.isSnapshottable()); assertEquals(DirectorySnapshottableFeature.SNAPSHOT_LIMIT,rootNode.getDirectorySnapshottableFeature().getSnapshotQuota()); hdfs.disallowSnapshot(root); rootNode=fsdir.getINode4Write(root.toString()).asDirectory(); assertTrue(rootNode.isSnapshottable()); assertEquals(0,rootNode.getDirectorySnapshottableFeature().getSnapshotQuota()); hdfs.disallowSnapshot(root); rootNode=fsdir.getINode4Write(root.toString()).asDirectory(); assertTrue(rootNode.isSnapshottable()); assertEquals(0,rootNode.getDirectorySnapshottableFeature().getSnapshotQuota()); }

Class: org.apache.hadoop.hdfs.server.namenode.snapshot.TestSnapshotBlocksMap

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier NullVerifier HybridVerifier 
/** * Test deleting a file with snapshots. Need to check the blocksMap to make * sure the corresponding record is updated correctly. */ @Test(timeout=60000) public void testDeletionWithSnapshots() throws Exception { Path file0=new Path(sub1,"file0"); Path file1=new Path(sub1,"file1"); Path sub2=new Path(sub1,"sub2"); Path file2=new Path(sub2,"file2"); Path file3=new Path(sub1,"file3"); Path file4=new Path(sub1,"file4"); Path file5=new Path(sub1,"file5"); DFSTestUtil.createFile(hdfs,file0,4 * BLOCKSIZE,REPLICATION,seed); DFSTestUtil.createFile(hdfs,file1,2 * BLOCKSIZE,REPLICATION,seed); DFSTestUtil.createFile(hdfs,file2,3 * BLOCKSIZE,REPLICATION,seed); { final INodeFile f2=assertBlockCollection(file2.toString(),3,fsdir,blockmanager); BlockInfo[] blocks=f2.getBlocks(); hdfs.delete(sub2,true); for ( BlockInfo b : blocks) { assertNull(blockmanager.getBlockCollection(b)); } } final String[] snapshots={"s0","s1","s2"}; DFSTestUtil.createFile(hdfs,file3,5 * BLOCKSIZE,REPLICATION,seed); SnapshotTestHelper.createSnapshot(hdfs,sub1,snapshots[0]); DFSTestUtil.createFile(hdfs,file4,1 * BLOCKSIZE,REPLICATION,seed); SnapshotTestHelper.createSnapshot(hdfs,sub1,snapshots[1]); DFSTestUtil.createFile(hdfs,file5,7 * BLOCKSIZE,REPLICATION,seed); SnapshotTestHelper.createSnapshot(hdfs,sub1,snapshots[2]); { INodeFile f1=assertBlockCollection(file1.toString(),2,fsdir,blockmanager); Assert.assertSame(INodeFile.class,f1.getClass()); hdfs.setReplication(file1,(short)2); f1=assertBlockCollection(file1.toString(),2,fsdir,blockmanager); assertTrue(f1.isWithSnapshot()); assertFalse(f1.isUnderConstruction()); } final INodeFile f0=assertBlockCollection(file0.toString(),4,fsdir,blockmanager); BlockInfo[] blocks0=f0.getBlocks(); Path snapshotFile0=SnapshotTestHelper.getSnapshotPath(sub1,"s0",file0.getName()); assertBlockCollection(snapshotFile0.toString(),4,fsdir,blockmanager); hdfs.delete(file0,true); for ( BlockInfo b : blocks0) { assertNotNull(blockmanager.getBlockCollection(b)); } assertBlockCollection(snapshotFile0.toString(),4,fsdir,blockmanager); String s1f0=SnapshotTestHelper.getSnapshotPath(sub1,"s1",file0.getName()).toString(); assertBlockCollection(s1f0,4,fsdir,blockmanager); hdfs.deleteSnapshot(sub1,"s1"); for ( BlockInfo b : blocks0) { assertNotNull(blockmanager.getBlockCollection(b)); } assertBlockCollection(snapshotFile0.toString(),4,fsdir,blockmanager); try { INodeFile.valueOf(fsdir.getINode(s1f0),s1f0); fail("Expect FileNotFoundException when identifying the INode in a deleted Snapshot"); } catch ( IOException e) { assertExceptionContains("File does not exist: " + s1f0,e); } }

Class: org.apache.hadoop.hdfs.server.namenode.snapshot.TestSnapshotDeletion

APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
/** * Test deleting the earliest (first) snapshot. In this simplest scenario, the * snapshots are taken on the same directory, and we do not need to combine * snapshot diffs. */ @Test(timeout=300000) public void testDeleteEarliestSnapshot1() throws Exception { Path file0=new Path(sub,"file0"); Path file1=new Path(sub,"file1"); DFSTestUtil.createFile(hdfs,file0,BLOCKSIZE,REPLICATION,seed); DFSTestUtil.createFile(hdfs,file1,BLOCKSIZE,REPLICATION,seed); String snapshotName="s1"; try { hdfs.deleteSnapshot(sub,snapshotName); fail("SnapshotException expected: " + sub.toString() + " is not snapshottable yet"); } catch ( Exception e) { GenericTestUtils.assertExceptionContains("Directory is not a snapshottable directory: " + sub,e); } hdfs.allowSnapshot(sub); try { hdfs.deleteSnapshot(sub,snapshotName); fail("SnapshotException expected: snapshot " + snapshotName + " does not exist for "+ sub.toString()); } catch ( Exception e) { GenericTestUtils.assertExceptionContains("Cannot delete snapshot " + snapshotName + " from path "+ sub.toString()+ ": the snapshot does not exist.",e); } SnapshotTestHelper.createSnapshot(hdfs,sub,snapshotName); checkQuotaUsageComputation(sub,4,BLOCKSIZE * REPLICATION * 2); hdfs.deleteSnapshot(sub,snapshotName); checkQuotaUsageComputation(sub,3,BLOCKSIZE * REPLICATION * 2); hdfs.createSnapshot(sub,snapshotName); checkQuotaUsageComputation(sub,4,BLOCKSIZE * REPLICATION * 2); Path newFile=new Path(sub,"newFile"); DFSTestUtil.createFile(hdfs,newFile,BLOCKSIZE,REPLICATION,seed); String snapshotName2="s2"; hdfs.createSnapshot(sub,snapshotName2); checkQuotaUsageComputation(sub,6,BLOCKSIZE * REPLICATION * 3); Path ss=SnapshotTestHelper.getSnapshotPath(sub,snapshotName2,"newFile"); FileStatus statusBeforeDeletion=hdfs.getFileStatus(ss); hdfs.deleteSnapshot(sub,snapshotName); checkQuotaUsageComputation(sub,5,BLOCKSIZE * REPLICATION * 3); FileStatus statusAfterDeletion=hdfs.getFileStatus(ss); System.out.println("Before deletion: " + statusBeforeDeletion.toString() + "\n"+ "After deletion: "+ statusAfterDeletion.toString()); assertEquals(statusBeforeDeletion.toString(),statusAfterDeletion.toString()); }

UtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Test deleting the earliest (first) snapshot. In this more complicated * scenario, the snapshots are taken across directories. *
 * The test covers the following scenarios:
 * 1. delete the first diff in the diff list of a directory
 * 2. delete the first diff in the diff list of a file
 * 
* Also, the recursive cleanTree process should cover both INodeFile and * INodeDirectory. */ @Test(timeout=300000) public void testDeleteEarliestSnapshot2() throws Exception { Path noChangeDir=new Path(sub,"noChangeDir"); Path noChangeFile=new Path(noChangeDir,"noChangeFile"); Path metaChangeFile=new Path(noChangeDir,"metaChangeFile"); Path metaChangeDir=new Path(noChangeDir,"metaChangeDir"); Path toDeleteFile=new Path(metaChangeDir,"toDeleteFile"); DFSTestUtil.createFile(hdfs,noChangeFile,BLOCKSIZE,REPLICATION,seed); DFSTestUtil.createFile(hdfs,metaChangeFile,BLOCKSIZE,REPLICATION,seed); DFSTestUtil.createFile(hdfs,toDeleteFile,BLOCKSIZE,REPLICATION,seed); final INodeFile toDeleteFileNode=TestSnapshotBlocksMap.assertBlockCollection(toDeleteFile.toString(),1,fsdir,blockmanager); BlockInfo[] blocks=toDeleteFileNode.getBlocks(); SnapshotTestHelper.createSnapshot(hdfs,dir,"s0"); checkQuotaUsageComputation(dir,8,3 * BLOCKSIZE * REPLICATION); hdfs.delete(toDeleteFile,true); checkQuotaUsageComputation(dir,10,3 * BLOCKSIZE * REPLICATION); hdfs.setReplication(metaChangeFile,REPLICATION_1); hdfs.setOwner(metaChangeDir,"unknown","unknown"); checkQuotaUsageComputation(dir,11,3 * BLOCKSIZE * REPLICATION); hdfs.createSnapshot(dir,"s1"); checkQuotaUsageComputation(dir,12,3 * BLOCKSIZE * REPLICATION); hdfs.deleteSnapshot(dir,"s0"); checkQuotaUsageComputation(dir,7,2 * BLOCKSIZE * REPLICATION - BLOCKSIZE); for ( BlockInfo b : blocks) { assertNull(blockmanager.getBlockCollection(b)); } final INodeDirectory dirNode=fsdir.getINode(dir.toString()).asDirectory(); Snapshot snapshot0=dirNode.getSnapshot(DFSUtil.string2Bytes("s0")); assertNull(snapshot0); Snapshot snapshot1=dirNode.getSnapshot(DFSUtil.string2Bytes("s1")); DirectoryDiffList diffList=dirNode.getDiffs(); assertEquals(1,diffList.asList().size()); assertEquals(snapshot1.getId(),diffList.getLast().getSnapshotId()); diffList=fsdir.getINode(metaChangeDir.toString()).asDirectory().getDiffs(); assertEquals(0,diffList.asList().size()); final INodeDirectory noChangeDirNode=(INodeDirectory)fsdir.getINode(noChangeDir.toString()); assertEquals(INodeDirectory.class,noChangeDirNode.getClass()); final INodeFile noChangeFileNode=(INodeFile)fsdir.getINode(noChangeFile.toString()); assertEquals(INodeFile.class,noChangeFileNode.getClass()); TestSnapshotBlocksMap.assertBlockCollection(noChangeFile.toString(),1,fsdir,blockmanager); FileStatus status=hdfs.getFileStatus(metaChangeDir); assertEquals("unknown",status.getOwner()); assertEquals("unknown",status.getGroup()); status=hdfs.getFileStatus(metaChangeFile); assertEquals(REPLICATION_1,status.getReplication()); TestSnapshotBlocksMap.assertBlockCollection(metaChangeFile.toString(),1,fsdir,blockmanager); try { status=hdfs.getFileStatus(toDeleteFile); fail("should throw FileNotFoundException"); } catch ( FileNotFoundException e) { GenericTestUtils.assertExceptionContains("File does not exist: " + toDeleteFile.toString(),e); } final Path toDeleteFileInSnapshot=SnapshotTestHelper.getSnapshotPath(dir,"s0",toDeleteFile.toString().substring(dir.toString().length())); try { status=hdfs.getFileStatus(toDeleteFileInSnapshot); fail("should throw FileNotFoundException"); } catch ( FileNotFoundException e) { GenericTestUtils.assertExceptionContains("File does not exist: " + toDeleteFileInSnapshot.toString(),e); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Test deleting a directory which is a descendant of a snapshottable * directory. In the test we need to cover the following cases: *
 * 1. Delete current INodeFile/INodeDirectory without taking any snapshot.
 * 2. Delete current INodeFile/INodeDirectory while snapshots have been taken 
 * on ancestor(s).
 * 3. Delete current INodeFileWithSnapshot.
 * 4. Delete current INodeDirectoryWithSnapshot.
 * 
*/ @Test(timeout=300000) public void testDeleteCurrentFileDirectory() throws Exception { Path deleteDir=new Path(subsub,"deleteDir"); Path deleteFile=new Path(deleteDir,"deleteFile"); Path noChangeDirParent=new Path(sub,"noChangeDirParent"); Path noChangeDir=new Path(noChangeDirParent,"noChangeDir"); Path noChangeFile=new Path(noChangeDir,"noChangeFile"); DFSTestUtil.createFile(hdfs,deleteFile,BLOCKSIZE,REPLICATION,seed); DFSTestUtil.createFile(hdfs,noChangeFile,BLOCKSIZE,REPLICATION,seed); Path metaChangeFile1=new Path(subsub,"metaChangeFile1"); DFSTestUtil.createFile(hdfs,metaChangeFile1,BLOCKSIZE,REPLICATION,seed); Path metaChangeFile2=new Path(noChangeDir,"metaChangeFile2"); DFSTestUtil.createFile(hdfs,metaChangeFile2,BLOCKSIZE,REPLICATION,seed); hdfs.delete(deleteDir,true); SnapshotTestHelper.createSnapshot(hdfs,dir,"s0"); Path tempDir=new Path(dir,"tempdir"); Path tempFile=new Path(tempDir,"tempfile"); DFSTestUtil.createFile(hdfs,tempFile,BLOCKSIZE,REPLICATION,seed); final INodeFile temp=TestSnapshotBlocksMap.assertBlockCollection(tempFile.toString(),1,fsdir,blockmanager); BlockInfo[] blocks=temp.getBlocks(); hdfs.delete(tempDir,true); checkQuotaUsageComputation(dir,9L,BLOCKSIZE * REPLICATION * 3); for ( BlockInfo b : blocks) { assertNull(blockmanager.getBlockCollection(b)); } Path newFileAfterS0=new Path(subsub,"newFile"); DFSTestUtil.createFile(hdfs,newFileAfterS0,BLOCKSIZE,REPLICATION,seed); hdfs.setReplication(metaChangeFile1,REPLICATION_1); hdfs.setReplication(metaChangeFile2,REPLICATION_1); SnapshotTestHelper.createSnapshot(hdfs,dir,"s1"); checkQuotaUsageComputation(dir,14L,BLOCKSIZE * REPLICATION * 4); Snapshot snapshot0=fsdir.getINode(dir.toString()).asDirectory().getSnapshot(DFSUtil.string2Bytes("s0")); Snapshot snapshot1=fsdir.getINode(dir.toString()).asDirectory().getSnapshot(DFSUtil.string2Bytes("s1")); hdfs.delete(noChangeDirParent,true); checkQuotaUsageComputation(dir,17L,BLOCKSIZE * REPLICATION * 4); Path snapshotNoChangeDir=SnapshotTestHelper.getSnapshotPath(dir,"s1",sub.getName() + "/" + noChangeDirParent.getName()+ "/"+ noChangeDir.getName()); INodeDirectory snapshotNode=(INodeDirectory)fsdir.getINode(snapshotNoChangeDir.toString()); assertEquals(INodeDirectory.class,snapshotNode.getClass()); ReadOnlyList children=snapshotNode.getChildrenList(Snapshot.CURRENT_STATE_ID); assertEquals(2,children.size()); INode noChangeFileSCopy=children.get(1); assertEquals(noChangeFile.getName(),noChangeFileSCopy.getLocalName()); assertEquals(INodeFile.class,noChangeFileSCopy.getClass()); TestSnapshotBlocksMap.assertBlockCollection(new Path(snapshotNoChangeDir,noChangeFileSCopy.getLocalName()).toString(),1,fsdir,blockmanager); INodeFile metaChangeFile2SCopy=children.get(0).asFile(); assertEquals(metaChangeFile2.getName(),metaChangeFile2SCopy.getLocalName()); assertTrue(metaChangeFile2SCopy.isWithSnapshot()); assertFalse(metaChangeFile2SCopy.isUnderConstruction()); TestSnapshotBlocksMap.assertBlockCollection(new Path(snapshotNoChangeDir,metaChangeFile2SCopy.getLocalName()).toString(),1,fsdir,blockmanager); assertEquals(REPLICATION_1,metaChangeFile2SCopy.getFileReplication(Snapshot.CURRENT_STATE_ID)); assertEquals(REPLICATION_1,metaChangeFile2SCopy.getFileReplication(snapshot1.getId())); assertEquals(REPLICATION,metaChangeFile2SCopy.getFileReplication(snapshot0.getId())); Path newFile=new Path(sub,"newFile"); DFSTestUtil.createFile(hdfs,newFile,BLOCKSIZE,REPLICATION,seed); final INodeFile newFileNode=TestSnapshotBlocksMap.assertBlockCollection(newFile.toString(),1,fsdir,blockmanager); blocks=newFileNode.getBlocks(); checkQuotaUsageComputation(dir,18L,BLOCKSIZE * REPLICATION * 5); hdfs.delete(sub,true); checkQuotaUsageComputation(dir,19L,BLOCKSIZE * REPLICATION * 4); for ( BlockInfo b : blocks) { assertNull(blockmanager.getBlockCollection(b)); } Path snapshotSub=SnapshotTestHelper.getSnapshotPath(dir,"s1",sub.getName()); INodeDirectory snapshotNode4Sub=fsdir.getINode(snapshotSub.toString()).asDirectory(); assertTrue(snapshotNode4Sub.isWithSnapshot()); assertEquals(1,snapshotNode4Sub.getChildrenList(Snapshot.CURRENT_STATE_ID).size()); assertEquals(2,snapshotNode4Sub.getChildrenList(snapshot1.getId()).size()); INode snapshotNode4Subsub=snapshotNode4Sub.getChildrenList(Snapshot.CURRENT_STATE_ID).get(0); assertTrue(snapshotNode4Subsub.asDirectory().isWithSnapshot()); assertTrue(snapshotNode4Sub == snapshotNode4Subsub.getParent()); INodeDirectory snapshotSubsubDir=(INodeDirectory)snapshotNode4Subsub; children=snapshotSubsubDir.getChildrenList(Snapshot.CURRENT_STATE_ID); assertEquals(2,children.size()); assertEquals(children.get(0).getLocalName(),metaChangeFile1.getName()); assertEquals(children.get(1).getLocalName(),newFileAfterS0.getName()); children=snapshotSubsubDir.getChildrenList(snapshot0.getId()); assertEquals(1,children.size()); INode child=children.get(0); assertEquals(child.getLocalName(),metaChangeFile1.getName()); INodeFile metaChangeFile1SCopy=child.asFile(); assertTrue(metaChangeFile1SCopy.isWithSnapshot()); assertFalse(metaChangeFile1SCopy.isUnderConstruction()); assertEquals(REPLICATION_1,metaChangeFile1SCopy.getFileReplication(Snapshot.CURRENT_STATE_ID)); assertEquals(REPLICATION_1,metaChangeFile1SCopy.getFileReplication(snapshot1.getId())); assertEquals(REPLICATION,metaChangeFile1SCopy.getFileReplication(snapshot0.getId())); }

APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
/** * A test covering the case where the snapshot diff to be deleted is renamed * to its previous snapshot. */ @Test(timeout=300000) public void testRenameSnapshotDiff() throws Exception { cluster.getNamesystem().getSnapshotManager().setAllowNestedSnapshots(true); final Path subFile0=new Path(sub,"file0"); final Path subsubFile0=new Path(subsub,"file0"); DFSTestUtil.createFile(hdfs,subFile0,BLOCKSIZE,REPLICATION,seed); DFSTestUtil.createFile(hdfs,subsubFile0,BLOCKSIZE,REPLICATION,seed); hdfs.setOwner(subsub,"owner","group"); SnapshotTestHelper.createSnapshot(hdfs,sub,"s0"); checkQuotaUsageComputation(sub,5,BLOCKSIZE * 6); final Path subFile1=new Path(sub,"file1"); final Path subsubFile1=new Path(subsub,"file1"); DFSTestUtil.createFile(hdfs,subFile1,BLOCKSIZE,REPLICATION_1,seed); DFSTestUtil.createFile(hdfs,subsubFile1,BLOCKSIZE,REPLICATION,seed); checkQuotaUsageComputation(sub,8,BLOCKSIZE * 11); SnapshotTestHelper.createSnapshot(hdfs,sub,"s1"); checkQuotaUsageComputation(sub,9,BLOCKSIZE * 11); SnapshotTestHelper.createSnapshot(hdfs,dir,"s2"); checkQuotaUsageComputation(dir,11,BLOCKSIZE * 11); checkQuotaUsageComputation(sub,9,BLOCKSIZE * 11); hdfs.setOwner(subsub,"unknown","unknown"); hdfs.setReplication(subsubFile1,REPLICATION_1); checkQuotaUsageComputation(dir,13,BLOCKSIZE * 11); checkQuotaUsageComputation(sub,11,BLOCKSIZE * 11); hdfs.delete(subFile1,true); checkQuotaUsageComputation(new Path("/"),16,BLOCKSIZE * 11); checkQuotaUsageComputation(dir,15,BLOCKSIZE * 11); checkQuotaUsageComputation(sub,13,BLOCKSIZE * 11); Path subsubSnapshotCopy=SnapshotTestHelper.getSnapshotPath(dir,"s2",sub.getName() + Path.SEPARATOR + subsub.getName()); Path subsubFile1SCopy=SnapshotTestHelper.getSnapshotPath(dir,"s2",sub.getName() + Path.SEPARATOR + subsub.getName()+ Path.SEPARATOR+ subsubFile1.getName()); Path subFile1SCopy=SnapshotTestHelper.getSnapshotPath(dir,"s2",sub.getName() + Path.SEPARATOR + subFile1.getName()); FileStatus subsubStatus=hdfs.getFileStatus(subsubSnapshotCopy); assertEquals("owner",subsubStatus.getOwner()); assertEquals("group",subsubStatus.getGroup()); FileStatus subsubFile1Status=hdfs.getFileStatus(subsubFile1SCopy); assertEquals(REPLICATION,subsubFile1Status.getReplication()); FileStatus subFile1Status=hdfs.getFileStatus(subFile1SCopy); assertEquals(REPLICATION_1,subFile1Status.getReplication()); hdfs.deleteSnapshot(dir,"s2"); checkQuotaUsageComputation(new Path("/"),14,BLOCKSIZE * 11); checkQuotaUsageComputation(dir,13,BLOCKSIZE * 11); checkQuotaUsageComputation(sub,12,BLOCKSIZE * 11); try { hdfs.getFileStatus(subsubSnapshotCopy); fail("should throw FileNotFoundException"); } catch ( FileNotFoundException e) { GenericTestUtils.assertExceptionContains("File does not exist: " + subsubSnapshotCopy.toString(),e); } try { hdfs.getFileStatus(subsubFile1SCopy); fail("should throw FileNotFoundException"); } catch ( FileNotFoundException e) { GenericTestUtils.assertExceptionContains("File does not exist: " + subsubFile1SCopy.toString(),e); } try { hdfs.getFileStatus(subFile1SCopy); fail("should throw FileNotFoundException"); } catch ( FileNotFoundException e) { GenericTestUtils.assertExceptionContains("File does not exist: " + subFile1SCopy.toString(),e); } subsubSnapshotCopy=SnapshotTestHelper.getSnapshotPath(sub,"s1",subsub.getName()); subsubFile1SCopy=SnapshotTestHelper.getSnapshotPath(sub,"s1",subsub.getName() + Path.SEPARATOR + subsubFile1.getName()); subFile1SCopy=SnapshotTestHelper.getSnapshotPath(sub,"s1",subFile1.getName()); subsubStatus=hdfs.getFileStatus(subsubSnapshotCopy); assertEquals("owner",subsubStatus.getOwner()); assertEquals("group",subsubStatus.getGroup()); subsubFile1Status=hdfs.getFileStatus(subsubFile1SCopy); assertEquals(REPLICATION,subsubFile1Status.getReplication()); subFile1Status=hdfs.getFileStatus(subFile1SCopy); assertEquals(REPLICATION_1,subFile1Status.getReplication()); }

Class: org.apache.hadoop.hdfs.server.namenode.snapshot.TestSnapshotDiffReport

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
/** * Test the computation and representation of diff between snapshots */ @Test(timeout=60000) public void testDiffReport() throws Exception { cluster.getNamesystem().getSnapshotManager().setAllowNestedSnapshots(true); Path subsub1=new Path(sub1,"subsub1"); Path subsubsub1=new Path(subsub1,"subsubsub1"); hdfs.mkdirs(subsubsub1); modifyAndCreateSnapshot(sub1,new Path[]{sub1,subsubsub1}); modifyAndCreateSnapshot(subsubsub1,new Path[]{sub1,subsubsub1}); try { hdfs.getSnapshotDiffReport(subsub1,"s1","s2"); fail("Expect exception when getting snapshot diff report: " + subsub1 + " is not a snapshottable directory."); } catch ( IOException e) { GenericTestUtils.assertExceptionContains("Directory is not a snapshottable directory: " + subsub1,e); } final String invalidName="invalid"; try { hdfs.getSnapshotDiffReport(sub1,invalidName,invalidName); fail("Expect exception when providing invalid snapshot name for diff report"); } catch ( IOException e) { GenericTestUtils.assertExceptionContains("Cannot find the snapshot of directory " + sub1 + " with name "+ invalidName,e); } SnapshotDiffReport report=hdfs.getSnapshotDiffReport(sub1,"s0","s0"); System.out.println(report); assertEquals(0,report.getDiffList().size()); report=hdfs.getSnapshotDiffReport(sub1,"",""); System.out.println(report); assertEquals(0,report.getDiffList().size()); report=hdfs.getSnapshotDiffReport(subsubsub1,"s0","s2"); System.out.println(report); assertEquals(0,report.getDiffList().size()); report=hdfs.getSnapshotDiffReport(hdfs.makeQualified(subsubsub1),"s0","s2"); System.out.println(report); assertEquals(0,report.getDiffList().size()); verifyDiffReport(sub1,"s0","s2",new DiffReportEntry(DiffType.MODIFY,DFSUtil.string2Bytes("")),new DiffReportEntry(DiffType.CREATE,DFSUtil.string2Bytes("file15")),new DiffReportEntry(DiffType.DELETE,DFSUtil.string2Bytes("file12")),new DiffReportEntry(DiffType.DELETE,DFSUtil.string2Bytes("file11")),new DiffReportEntry(DiffType.CREATE,DFSUtil.string2Bytes("file11")),new DiffReportEntry(DiffType.MODIFY,DFSUtil.string2Bytes("file13")),new DiffReportEntry(DiffType.DELETE,DFSUtil.string2Bytes("link13")),new DiffReportEntry(DiffType.CREATE,DFSUtil.string2Bytes("link13"))); verifyDiffReport(sub1,"s0","s5",new DiffReportEntry(DiffType.MODIFY,DFSUtil.string2Bytes("")),new DiffReportEntry(DiffType.CREATE,DFSUtil.string2Bytes("file15")),new DiffReportEntry(DiffType.DELETE,DFSUtil.string2Bytes("file12")),new DiffReportEntry(DiffType.MODIFY,DFSUtil.string2Bytes("file10")),new DiffReportEntry(DiffType.DELETE,DFSUtil.string2Bytes("file11")),new DiffReportEntry(DiffType.CREATE,DFSUtil.string2Bytes("file11")),new DiffReportEntry(DiffType.MODIFY,DFSUtil.string2Bytes("file13")),new DiffReportEntry(DiffType.DELETE,DFSUtil.string2Bytes("link13")),new DiffReportEntry(DiffType.CREATE,DFSUtil.string2Bytes("link13")),new DiffReportEntry(DiffType.MODIFY,DFSUtil.string2Bytes("subsub1/subsubsub1")),new DiffReportEntry(DiffType.CREATE,DFSUtil.string2Bytes("subsub1/subsubsub1/file10")),new DiffReportEntry(DiffType.CREATE,DFSUtil.string2Bytes("subsub1/subsubsub1/file11")),new DiffReportEntry(DiffType.CREATE,DFSUtil.string2Bytes("subsub1/subsubsub1/file13")),new DiffReportEntry(DiffType.CREATE,DFSUtil.string2Bytes("subsub1/subsubsub1/link13")),new DiffReportEntry(DiffType.CREATE,DFSUtil.string2Bytes("subsub1/subsubsub1/file15"))); verifyDiffReport(sub1,"s2","s5",new DiffReportEntry(DiffType.MODIFY,DFSUtil.string2Bytes("file10")),new DiffReportEntry(DiffType.MODIFY,DFSUtil.string2Bytes("subsub1/subsubsub1")),new DiffReportEntry(DiffType.CREATE,DFSUtil.string2Bytes("subsub1/subsubsub1/file10")),new DiffReportEntry(DiffType.CREATE,DFSUtil.string2Bytes("subsub1/subsubsub1/file11")),new DiffReportEntry(DiffType.CREATE,DFSUtil.string2Bytes("subsub1/subsubsub1/file13")),new DiffReportEntry(DiffType.CREATE,DFSUtil.string2Bytes("subsub1/subsubsub1/link13")),new DiffReportEntry(DiffType.CREATE,DFSUtil.string2Bytes("subsub1/subsubsub1/file15"))); verifyDiffReport(sub1,"s3","",new DiffReportEntry(DiffType.MODIFY,DFSUtil.string2Bytes("subsub1/subsubsub1")),new DiffReportEntry(DiffType.CREATE,DFSUtil.string2Bytes("subsub1/subsubsub1/file15")),new DiffReportEntry(DiffType.DELETE,DFSUtil.string2Bytes("subsub1/subsubsub1/file12")),new DiffReportEntry(DiffType.MODIFY,DFSUtil.string2Bytes("subsub1/subsubsub1/file10")),new DiffReportEntry(DiffType.DELETE,DFSUtil.string2Bytes("subsub1/subsubsub1/file11")),new DiffReportEntry(DiffType.CREATE,DFSUtil.string2Bytes("subsub1/subsubsub1/file11")),new DiffReportEntry(DiffType.MODIFY,DFSUtil.string2Bytes("subsub1/subsubsub1/file13")),new DiffReportEntry(DiffType.CREATE,DFSUtil.string2Bytes("subsub1/subsubsub1/link13")),new DiffReportEntry(DiffType.DELETE,DFSUtil.string2Bytes("subsub1/subsubsub1/link13"))); }

Class: org.apache.hadoop.hdfs.server.namenode.snapshot.TestSnapshotListing

IterativeVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
/** * Test listing snapshots under a snapshottable directory */ @Test(timeout=15000) public void testListSnapshots() throws Exception { final Path snapshotsPath=new Path(dir,".snapshot"); FileStatus[] stats=null; stats=hdfs.listStatus(new Path("/.snapshot")); assertEquals(0,stats.length); try { stats=hdfs.listStatus(snapshotsPath); fail("expect SnapshotException"); } catch ( IOException e) { GenericTestUtils.assertExceptionContains("Directory is not a snapshottable directory: " + dir.toString(),e); } hdfs.allowSnapshot(dir); stats=hdfs.listStatus(snapshotsPath); assertEquals(0,stats.length); final int snapshotNum=5; for (int sNum=0; sNum < snapshotNum; sNum++) { hdfs.createSnapshot(dir,"s_" + sNum); stats=hdfs.listStatus(snapshotsPath); assertEquals(sNum + 1,stats.length); for (int i=0; i <= sNum; i++) { assertEquals("s_" + i,stats[i].getPath().getName()); } } for (int sNum=snapshotNum - 1; sNum > 0; sNum--) { hdfs.deleteSnapshot(dir,"s_" + sNum); stats=hdfs.listStatus(snapshotsPath); assertEquals(sNum,stats.length); for (int i=0; i < sNum; i++) { assertEquals("s_" + i,stats[i].getPath().getName()); } } hdfs.deleteSnapshot(dir,"s_0"); stats=hdfs.listStatus(snapshotsPath); assertEquals(0,stats.length); }

Class: org.apache.hadoop.hdfs.server.namenode.snapshot.TestSnapshotManager

UtilityVerifier BooleanVerifier HybridVerifier 
/** * Test that the global limit on snapshots is honored. */ @Test(timeout=10000) public void testSnapshotLimits() throws Exception { INodeDirectory ids=mock(INodeDirectory.class); FSDirectory fsdir=mock(FSDirectory.class); SnapshotManager sm=spy(new SnapshotManager(fsdir)); doReturn(ids).when(sm).getSnapshottableRoot(anyString()); doReturn(testMaxSnapshotLimit).when(sm).getMaxSnapshotID(); for (Integer i=0; i < testMaxSnapshotLimit; ++i) { sm.createSnapshot("dummy",i.toString()); } try { sm.createSnapshot("dummy","shouldFailSnapshot"); Assert.fail("Expected SnapshotException not thrown"); } catch ( SnapshotException se) { Assert.assertTrue(se.getMessage().toLowerCase().contains("rollover")); } sm.deleteSnapshot("","",mock(INode.BlocksMapUpdateInfo.class),new ArrayList()); try { sm.createSnapshot("dummy","shouldFailSnapshot2"); Assert.fail("Expected SnapshotException not thrown"); } catch ( SnapshotException se) { Assert.assertTrue(se.getMessage().toLowerCase().contains("rollover")); } }

Class: org.apache.hadoop.hdfs.server.namenode.snapshot.TestSnapshotRename

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test FileStatus of snapshot file before/after rename */ @Test(timeout=60000) public void testSnapshotRename() throws Exception { DFSTestUtil.createFile(hdfs,file1,BLOCKSIZE,REPLICATION,seed); Path snapshotRoot=SnapshotTestHelper.createSnapshot(hdfs,sub1,"s1"); Path ssPath=new Path(snapshotRoot,file1.getName()); assertTrue(hdfs.exists(ssPath)); FileStatus statusBeforeRename=hdfs.getFileStatus(ssPath); hdfs.renameSnapshot(sub1,"s1","s2"); assertFalse(hdfs.exists(ssPath)); snapshotRoot=SnapshotTestHelper.getSnapshotRoot(sub1,"s2"); ssPath=new Path(snapshotRoot,file1.getName()); assertTrue(hdfs.exists(ssPath)); FileStatus statusAfterRename=hdfs.getFileStatus(ssPath); assertFalse(statusBeforeRename.equals(statusAfterRename)); statusBeforeRename.setPath(statusAfterRename.getPath()); assertEquals(statusBeforeRename.toString(),statusAfterRename.toString()); }

Class: org.apache.hadoop.hdfs.server.namenode.snapshot.TestSnapshotStatsMXBean

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test getting SnapshotStatsMXBean information */ @Test public void testSnapshotStatsMXBeanInfo() throws Exception { Configuration conf=new Configuration(); MiniDFSCluster cluster=null; String pathName="/snapshot"; Path path=new Path(pathName); try { cluster=new MiniDFSCluster.Builder(conf).build(); cluster.waitActive(); SnapshotManager sm=cluster.getNamesystem().getSnapshotManager(); DistributedFileSystem dfs=(DistributedFileSystem)cluster.getFileSystem(); dfs.mkdirs(path); dfs.allowSnapshot(path); dfs.createSnapshot(path); MBeanServer mbs=ManagementFactory.getPlatformMBeanServer(); ObjectName mxbeanName=new ObjectName("Hadoop:service=NameNode,name=SnapshotInfo"); CompositeData[] directories=(CompositeData[])mbs.getAttribute(mxbeanName,"SnapshottableDirectories"); int numDirectories=Array.getLength(directories); assertEquals(sm.getNumSnapshottableDirs(),numDirectories); CompositeData[] snapshots=(CompositeData[])mbs.getAttribute(mxbeanName,"Snapshots"); int numSnapshots=Array.getLength(snapshots); assertEquals(sm.getNumSnapshots(),numSnapshots); CompositeData d=(CompositeData)Array.get(directories,0); CompositeData s=(CompositeData)Array.get(snapshots,0); assertTrue(((String)d.get("path")).contains(pathName)); assertTrue(((String)s.get("snapshotDirectory")).contains(pathName)); } finally { if (cluster != null) { cluster.shutdown(); } } }

Class: org.apache.hadoop.hdfs.server.namenode.snapshot.TestSnapshottableDirListing

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Test listing all the snapshottable directories */ @Test(timeout=60000) public void testListSnapshottableDir() throws Exception { cluster.getNamesystem().getSnapshotManager().setAllowNestedSnapshots(true); SnapshottableDirectoryStatus[] dirs=hdfs.getSnapshottableDirListing(); assertNull(dirs); final Path root=new Path("/"); hdfs.allowSnapshot(root); dirs=hdfs.getSnapshottableDirListing(); assertEquals(1,dirs.length); assertEquals("",dirs[0].getDirStatus().getLocalName()); assertEquals(root,dirs[0].getFullPath()); hdfs.disallowSnapshot(root); dirs=hdfs.getSnapshottableDirListing(); assertNull(dirs); hdfs.allowSnapshot(dir1); dirs=hdfs.getSnapshottableDirListing(); assertEquals(1,dirs.length); assertEquals(dir1.getName(),dirs[0].getDirStatus().getLocalName()); assertEquals(dir1,dirs[0].getFullPath()); assertEquals(0,dirs[0].getSnapshotNumber()); hdfs.allowSnapshot(dir2); dirs=hdfs.getSnapshottableDirListing(); assertEquals(2,dirs.length); assertEquals(dir1.getName(),dirs[0].getDirStatus().getLocalName()); assertEquals(dir1,dirs[0].getFullPath()); assertEquals(dir2.getName(),dirs[1].getDirStatus().getLocalName()); assertEquals(dir2,dirs[1].getFullPath()); assertEquals(0,dirs[1].getSnapshotNumber()); final Path dir3=new Path("/TestSnapshot3"); hdfs.mkdirs(dir3); hdfs.rename(dir3,dir2,Rename.OVERWRITE); dirs=hdfs.getSnapshottableDirListing(); assertEquals(1,dirs.length); assertEquals(dir1,dirs[0].getFullPath()); hdfs.allowSnapshot(dir2); hdfs.createSnapshot(dir2,"s1"); hdfs.createSnapshot(dir2,"s2"); dirs=hdfs.getSnapshottableDirListing(); assertEquals(dir2,dirs[1].getFullPath()); assertEquals(2,dirs[1].getSnapshotNumber()); Path sub1=new Path(dir1,"sub1"); Path file1=new Path(sub1,"file1"); Path sub2=new Path(dir1,"sub2"); Path file2=new Path(sub2,"file2"); DFSTestUtil.createFile(hdfs,file1,BLOCKSIZE,REPLICATION,seed); DFSTestUtil.createFile(hdfs,file2,BLOCKSIZE,REPLICATION,seed); hdfs.allowSnapshot(sub1); hdfs.allowSnapshot(sub2); dirs=hdfs.getSnapshottableDirListing(); assertEquals(4,dirs.length); assertEquals(dir1,dirs[0].getFullPath()); assertEquals(dir2,dirs[1].getFullPath()); assertEquals(sub1,dirs[2].getFullPath()); assertEquals(sub2,dirs[3].getFullPath()); hdfs.disallowSnapshot(sub1); dirs=hdfs.getSnapshottableDirListing(); assertEquals(3,dirs.length); assertEquals(dir1,dirs[0].getFullPath()); assertEquals(dir2,dirs[1].getFullPath()); assertEquals(sub2,dirs[2].getFullPath()); hdfs.delete(dir1,true); dirs=hdfs.getSnapshottableDirListing(); assertEquals(1,dirs.length); assertEquals(dir2.getName(),dirs[0].getDirStatus().getLocalName()); assertEquals(dir2,dirs[0].getFullPath()); }

Class: org.apache.hadoop.hdfs.server.namenode.startupprogress.TestStartupProgress

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testThreadSafety() throws Exception { int numThreads=100; Phase[] phases={LOADING_FSIMAGE,LOADING_FSIMAGE,LOADING_EDITS,LOADING_EDITS}; Step[] steps=new Step[]{new Step(INODES),new Step(DELEGATION_KEYS),new Step(INODES),new Step(DELEGATION_KEYS)}; String[] files={"file1","file1","file2","file2"}; long[] sizes={1000L,1000L,2000L,2000L}; long[] totals={10000L,20000L,30000L,40000L}; ExecutorService exec=Executors.newFixedThreadPool(numThreads); try { for (int i=0; i < numThreads; ++i) { final Phase phase=phases[i % phases.length]; final Step step=steps[i % steps.length]; final String file=files[i % files.length]; final long size=sizes[i % sizes.length]; final long total=totals[i % totals.length]; exec.submit(new Callable(){ @Override public Void call(){ startupProgress.beginPhase(phase); startupProgress.setFile(phase,file); startupProgress.setSize(phase,size); startupProgress.setTotal(phase,step,total); incrementCounter(startupProgress,phase,step,100L); startupProgress.endStep(phase,step); startupProgress.endPhase(phase); return null; } } ); } } finally { exec.shutdown(); assertTrue(exec.awaitTermination(10000L,TimeUnit.MILLISECONDS)); } StartupProgressView view=startupProgress.createView(); assertNotNull(view); assertEquals("file1",view.getFile(LOADING_FSIMAGE)); assertEquals(1000L,view.getSize(LOADING_FSIMAGE)); assertEquals(10000L,view.getTotal(LOADING_FSIMAGE,new Step(INODES))); assertEquals(2500L,view.getCount(LOADING_FSIMAGE,new Step(INODES))); assertEquals(20000L,view.getTotal(LOADING_FSIMAGE,new Step(DELEGATION_KEYS))); assertEquals(2500L,view.getCount(LOADING_FSIMAGE,new Step(DELEGATION_KEYS))); assertEquals("file2",view.getFile(LOADING_EDITS)); assertEquals(2000L,view.getSize(LOADING_EDITS)); assertEquals(30000L,view.getTotal(LOADING_EDITS,new Step(INODES))); assertEquals(2500L,view.getCount(LOADING_EDITS,new Step(INODES))); assertEquals(40000L,view.getTotal(LOADING_EDITS,new Step(DELEGATION_KEYS))); assertEquals(2500L,view.getCount(LOADING_EDITS,new Step(DELEGATION_KEYS))); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testCounter(){ startupProgress.beginPhase(LOADING_FSIMAGE); Step loadingFsImageInodes=new Step(INODES); startupProgress.beginStep(LOADING_FSIMAGE,loadingFsImageInodes); incrementCounter(startupProgress,LOADING_FSIMAGE,loadingFsImageInodes,100L); startupProgress.endStep(LOADING_FSIMAGE,loadingFsImageInodes); Step loadingFsImageDelegationKeys=new Step(DELEGATION_KEYS); startupProgress.beginStep(LOADING_FSIMAGE,loadingFsImageDelegationKeys); incrementCounter(startupProgress,LOADING_FSIMAGE,loadingFsImageDelegationKeys,200L); startupProgress.endStep(LOADING_FSIMAGE,loadingFsImageDelegationKeys); startupProgress.endPhase(LOADING_FSIMAGE); startupProgress.beginPhase(LOADING_EDITS); Step loadingEditsFile=new Step("file",1000L); startupProgress.beginStep(LOADING_EDITS,loadingEditsFile); incrementCounter(startupProgress,LOADING_EDITS,loadingEditsFile,5000L); StartupProgressView view=startupProgress.createView(); assertNotNull(view); assertEquals(100L,view.getCount(LOADING_FSIMAGE,loadingFsImageInodes)); assertEquals(200L,view.getCount(LOADING_FSIMAGE,loadingFsImageDelegationKeys)); assertEquals(5000L,view.getCount(LOADING_EDITS,loadingEditsFile)); assertEquals(0L,view.getCount(SAVING_CHECKPOINT,new Step(INODES))); incrementCounter(startupProgress,LOADING_EDITS,loadingEditsFile,1000L); startupProgress.endStep(LOADING_EDITS,loadingEditsFile); startupProgress.endPhase(LOADING_EDITS); assertEquals(5000L,view.getCount(LOADING_EDITS,loadingEditsFile)); view=startupProgress.createView(); assertNotNull(view); assertEquals(6000L,view.getCount(LOADING_EDITS,loadingEditsFile)); }

InternalCallVerifier NullVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
@Test(timeout=10000) public void testStepSequence(){ Step[] expectedSteps=new Step[]{new Step(INODES,"file1"),new Step(DELEGATION_KEYS,"file1"),new Step(INODES,"file2"),new Step(DELEGATION_KEYS,"file2"),new Step(INODES,"file3"),new Step(DELEGATION_KEYS,"file3")}; List shuffledSteps=new ArrayList(Arrays.asList(expectedSteps)); Collections.shuffle(shuffledSteps); startupProgress.beginPhase(SAVING_CHECKPOINT); for ( Step step : shuffledSteps) { startupProgress.beginStep(SAVING_CHECKPOINT,step); } List actualSteps=new ArrayList(expectedSteps.length); StartupProgressView view=startupProgress.createView(); assertNotNull(view); for ( Step step : view.getSteps(SAVING_CHECKPOINT)) { actualSteps.add(step); } assertArrayEquals(expectedSteps,actualSteps.toArray()); }

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testElapsedTime() throws Exception { startupProgress.beginPhase(LOADING_FSIMAGE); Step loadingFsImageInodes=new Step(INODES); startupProgress.beginStep(LOADING_FSIMAGE,loadingFsImageInodes); Thread.sleep(50L); startupProgress.endStep(LOADING_FSIMAGE,loadingFsImageInodes); Step loadingFsImageDelegationKeys=new Step(DELEGATION_KEYS); startupProgress.beginStep(LOADING_FSIMAGE,loadingFsImageDelegationKeys); Thread.sleep(50L); startupProgress.endStep(LOADING_FSIMAGE,loadingFsImageDelegationKeys); startupProgress.endPhase(LOADING_FSIMAGE); startupProgress.beginPhase(LOADING_EDITS); Step loadingEditsFile=new Step("file",1000L); startupProgress.beginStep(LOADING_EDITS,loadingEditsFile); startupProgress.setTotal(LOADING_EDITS,loadingEditsFile,10000L); incrementCounter(startupProgress,LOADING_EDITS,loadingEditsFile,5000L); Thread.sleep(50L); StartupProgressView view=startupProgress.createView(); assertNotNull(view); assertTrue(view.getElapsedTime() > 0); assertTrue(view.getElapsedTime(LOADING_FSIMAGE) > 0); assertTrue(view.getElapsedTime(LOADING_FSIMAGE,loadingFsImageInodes) > 0); assertTrue(view.getElapsedTime(LOADING_FSIMAGE,loadingFsImageDelegationKeys) > 0); assertTrue(view.getElapsedTime(LOADING_EDITS) > 0); assertTrue(view.getElapsedTime(LOADING_EDITS,loadingEditsFile) > 0); assertTrue(view.getElapsedTime(SAVING_CHECKPOINT) == 0); assertTrue(view.getElapsedTime(SAVING_CHECKPOINT,new Step(INODES)) == 0); long totalTime=view.getElapsedTime(); long loadingFsImageTime=view.getElapsedTime(LOADING_FSIMAGE); long loadingFsImageInodesTime=view.getElapsedTime(LOADING_FSIMAGE,loadingFsImageInodes); long loadingFsImageDelegationKeysTime=view.getElapsedTime(LOADING_FSIMAGE,loadingFsImageInodes); long loadingEditsTime=view.getElapsedTime(LOADING_EDITS); long loadingEditsFileTime=view.getElapsedTime(LOADING_EDITS,loadingEditsFile); Thread.sleep(50L); assertTrue(totalTime < view.getElapsedTime()); assertEquals(loadingFsImageTime,view.getElapsedTime(LOADING_FSIMAGE)); assertEquals(loadingFsImageInodesTime,view.getElapsedTime(LOADING_FSIMAGE,loadingFsImageInodes)); assertTrue(loadingEditsTime < view.getElapsedTime(LOADING_EDITS)); assertTrue(loadingEditsFileTime < view.getElapsedTime(LOADING_EDITS,loadingEditsFile)); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testPercentComplete(){ startupProgress.beginPhase(LOADING_FSIMAGE); Step loadingFsImageInodes=new Step(INODES); startupProgress.beginStep(LOADING_FSIMAGE,loadingFsImageInodes); startupProgress.setTotal(LOADING_FSIMAGE,loadingFsImageInodes,1000L); incrementCounter(startupProgress,LOADING_FSIMAGE,loadingFsImageInodes,100L); Step loadingFsImageDelegationKeys=new Step(DELEGATION_KEYS); startupProgress.beginStep(LOADING_FSIMAGE,loadingFsImageDelegationKeys); startupProgress.setTotal(LOADING_FSIMAGE,loadingFsImageDelegationKeys,800L); incrementCounter(startupProgress,LOADING_FSIMAGE,loadingFsImageDelegationKeys,200L); startupProgress.beginPhase(LOADING_EDITS); Step loadingEditsFile=new Step("file",1000L); startupProgress.beginStep(LOADING_EDITS,loadingEditsFile); startupProgress.setTotal(LOADING_EDITS,loadingEditsFile,10000L); incrementCounter(startupProgress,LOADING_EDITS,loadingEditsFile,5000L); StartupProgressView view=startupProgress.createView(); assertNotNull(view); assertEquals(0.167f,view.getPercentComplete(),0.001f); assertEquals(0.167f,view.getPercentComplete(LOADING_FSIMAGE),0.001f); assertEquals(0.10f,view.getPercentComplete(LOADING_FSIMAGE,loadingFsImageInodes),0.001f); assertEquals(0.25f,view.getPercentComplete(LOADING_FSIMAGE,loadingFsImageDelegationKeys),0.001f); assertEquals(0.5f,view.getPercentComplete(LOADING_EDITS),0.001f); assertEquals(0.5f,view.getPercentComplete(LOADING_EDITS,loadingEditsFile),0.001f); assertEquals(0.0f,view.getPercentComplete(SAVING_CHECKPOINT),0.001f); assertEquals(0.0f,view.getPercentComplete(SAVING_CHECKPOINT,new Step(INODES)),0.001f); startupProgress.endStep(LOADING_FSIMAGE,loadingFsImageInodes); startupProgress.endStep(LOADING_FSIMAGE,loadingFsImageDelegationKeys); startupProgress.endPhase(LOADING_FSIMAGE); startupProgress.endStep(LOADING_EDITS,loadingEditsFile); startupProgress.endPhase(LOADING_EDITS); view=startupProgress.createView(); assertNotNull(view); assertEquals(0.5f,view.getPercentComplete(),0.001f); assertEquals(1.0f,view.getPercentComplete(LOADING_FSIMAGE),0.001f); assertEquals(1.0f,view.getPercentComplete(LOADING_FSIMAGE,loadingFsImageInodes),0.001f); assertEquals(1.0f,view.getPercentComplete(LOADING_FSIMAGE,loadingFsImageDelegationKeys),0.001f); assertEquals(1.0f,view.getPercentComplete(LOADING_EDITS),0.001f); assertEquals(1.0f,view.getPercentComplete(LOADING_EDITS,loadingEditsFile),0.001f); assertEquals(0.0f,view.getPercentComplete(SAVING_CHECKPOINT),0.001f); assertEquals(0.0f,view.getPercentComplete(SAVING_CHECKPOINT,new Step(INODES)),0.001f); }

UtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testInitialState(){ StartupProgressView view=startupProgress.createView(); assertNotNull(view); assertEquals(0L,view.getElapsedTime()); assertEquals(0.0f,view.getPercentComplete(),0.001f); List phases=new ArrayList(); for ( Phase phase : view.getPhases()) { phases.add(phase); assertEquals(0L,view.getElapsedTime(phase)); assertNull(view.getFile(phase)); assertEquals(0.0f,view.getPercentComplete(phase),0.001f); assertEquals(Long.MIN_VALUE,view.getSize(phase)); assertEquals(PENDING,view.getStatus(phase)); assertEquals(0L,view.getTotal(phase)); for ( Step step : view.getSteps(phase)) { fail(String.format("unexpected step %s in phase %s at initial state",step,phase)); } } assertArrayEquals(EnumSet.allOf(Phase.class).toArray(),phases.toArray()); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testFrozenAfterStartupCompletes(){ startupProgress.beginPhase(LOADING_FSIMAGE); startupProgress.setFile(LOADING_FSIMAGE,"file1"); startupProgress.setSize(LOADING_FSIMAGE,1000L); Step step=new Step(INODES); startupProgress.beginStep(LOADING_FSIMAGE,step); startupProgress.setTotal(LOADING_FSIMAGE,step,10000L); incrementCounter(startupProgress,LOADING_FSIMAGE,step,100L); startupProgress.endStep(LOADING_FSIMAGE,step); startupProgress.endPhase(LOADING_FSIMAGE); for ( Phase phase : EnumSet.allOf(Phase.class)) { if (startupProgress.getStatus(phase) != Status.COMPLETE) { startupProgress.beginPhase(phase); startupProgress.endPhase(phase); } } StartupProgressView before=startupProgress.createView(); startupProgress.beginPhase(LOADING_FSIMAGE); startupProgress.setFile(LOADING_FSIMAGE,"file2"); startupProgress.setSize(LOADING_FSIMAGE,2000L); startupProgress.beginStep(LOADING_FSIMAGE,step); startupProgress.setTotal(LOADING_FSIMAGE,step,20000L); incrementCounter(startupProgress,LOADING_FSIMAGE,step,100L); startupProgress.endStep(LOADING_FSIMAGE,step); startupProgress.endPhase(LOADING_FSIMAGE); startupProgress.beginPhase(LOADING_EDITS); Step newStep=new Step("file1"); startupProgress.beginStep(LOADING_EDITS,newStep); incrementCounter(startupProgress,LOADING_EDITS,newStep,100L); startupProgress.endStep(LOADING_EDITS,newStep); startupProgress.endPhase(LOADING_EDITS); StartupProgressView after=startupProgress.createView(); assertEquals(before.getCount(LOADING_FSIMAGE),after.getCount(LOADING_FSIMAGE)); assertEquals(before.getCount(LOADING_FSIMAGE,step),after.getCount(LOADING_FSIMAGE,step)); assertEquals(before.getElapsedTime(),after.getElapsedTime()); assertEquals(before.getElapsedTime(LOADING_FSIMAGE),after.getElapsedTime(LOADING_FSIMAGE)); assertEquals(before.getElapsedTime(LOADING_FSIMAGE,step),after.getElapsedTime(LOADING_FSIMAGE,step)); assertEquals(before.getFile(LOADING_FSIMAGE),after.getFile(LOADING_FSIMAGE)); assertEquals(before.getSize(LOADING_FSIMAGE),after.getSize(LOADING_FSIMAGE)); assertEquals(before.getTotal(LOADING_FSIMAGE),after.getTotal(LOADING_FSIMAGE)); assertEquals(before.getTotal(LOADING_FSIMAGE,step),after.getTotal(LOADING_FSIMAGE,step)); assertFalse(after.getSteps(LOADING_EDITS).iterator().hasNext()); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testTotal(){ startupProgress.beginPhase(LOADING_FSIMAGE); Step loadingFsImageInodes=new Step(INODES); startupProgress.beginStep(LOADING_FSIMAGE,loadingFsImageInodes); startupProgress.setTotal(LOADING_FSIMAGE,loadingFsImageInodes,1000L); startupProgress.endStep(LOADING_FSIMAGE,loadingFsImageInodes); Step loadingFsImageDelegationKeys=new Step(DELEGATION_KEYS); startupProgress.beginStep(LOADING_FSIMAGE,loadingFsImageDelegationKeys); startupProgress.setTotal(LOADING_FSIMAGE,loadingFsImageDelegationKeys,800L); startupProgress.endStep(LOADING_FSIMAGE,loadingFsImageDelegationKeys); startupProgress.endPhase(LOADING_FSIMAGE); startupProgress.beginPhase(LOADING_EDITS); Step loadingEditsFile=new Step("file",1000L); startupProgress.beginStep(LOADING_EDITS,loadingEditsFile); startupProgress.setTotal(LOADING_EDITS,loadingEditsFile,10000L); startupProgress.endStep(LOADING_EDITS,loadingEditsFile); startupProgress.endPhase(LOADING_EDITS); StartupProgressView view=startupProgress.createView(); assertNotNull(view); assertEquals(1000L,view.getTotal(LOADING_FSIMAGE,loadingFsImageInodes)); assertEquals(800L,view.getTotal(LOADING_FSIMAGE,loadingFsImageDelegationKeys)); assertEquals(10000L,view.getTotal(LOADING_EDITS,loadingEditsFile)); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testStatus(){ startupProgress.beginPhase(LOADING_FSIMAGE); startupProgress.endPhase(LOADING_FSIMAGE); startupProgress.beginPhase(LOADING_EDITS); StartupProgressView view=startupProgress.createView(); assertNotNull(view); assertEquals(COMPLETE,view.getStatus(LOADING_FSIMAGE)); assertEquals(RUNNING,view.getStatus(LOADING_EDITS)); assertEquals(PENDING,view.getStatus(SAVING_CHECKPOINT)); }

Class: org.apache.hadoop.hdfs.shortcircuit.TestShortCircuitCache

UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
@Test(timeout=60000) public void testEviction() throws Exception { final ShortCircuitCache cache=new ShortCircuitCache(2,10000000,1,10000000,1,10000,0); final TestFileDescriptorPair pairs[]=new TestFileDescriptorPair[]{new TestFileDescriptorPair(),new TestFileDescriptorPair(),new TestFileDescriptorPair()}; ShortCircuitReplicaInfo replicaInfos[]=new ShortCircuitReplicaInfo[]{null,null,null}; for (int i=0; i < pairs.length; i++) { replicaInfos[i]=cache.fetchOrCreate(new ExtendedBlockId(i,"test_bp1"),new SimpleReplicaCreator(i,cache,pairs[i])); Preconditions.checkNotNull(replicaInfos[i].getReplica()); Preconditions.checkState(replicaInfos[i].getInvalidTokenException() == null); pairs[i].compareWith(replicaInfos[i].getReplica().getDataStream(),replicaInfos[i].getReplica().getMetaStream()); } for (int i=0; i < pairs.length; i++) { replicaInfos[i].getReplica().unref(); } for (int i=1; i < pairs.length; i++) { final Integer iVal=new Integer(i); replicaInfos[i]=cache.fetchOrCreate(new ExtendedBlockId(i,"test_bp1"),new ShortCircuitReplicaCreator(){ @Override public ShortCircuitReplicaInfo createShortCircuitReplicaInfo(){ Assert.fail("expected to use existing entry for " + iVal); return null; } } ); Preconditions.checkNotNull(replicaInfos[i].getReplica()); Preconditions.checkState(replicaInfos[i].getInvalidTokenException() == null); pairs[i].compareWith(replicaInfos[i].getReplica().getDataStream(),replicaInfos[i].getReplica().getMetaStream()); } final MutableBoolean calledCreate=new MutableBoolean(false); replicaInfos[0]=cache.fetchOrCreate(new ExtendedBlockId(0,"test_bp1"),new ShortCircuitReplicaCreator(){ @Override public ShortCircuitReplicaInfo createShortCircuitReplicaInfo(){ calledCreate.setValue(true); return null; } } ); Preconditions.checkState(replicaInfos[0].getReplica() == null); Assert.assertTrue(calledCreate.isTrue()); for (int i=1; i < pairs.length; i++) { replicaInfos[i].getReplica().unref(); } for (int i=0; i < pairs.length; i++) { pairs[i].close(); } cache.close(); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier 
@Test(timeout=60000) public void testShmBasedStaleness() throws Exception { BlockReaderTestUtil.enableShortCircuitShmTracing(); TemporarySocketDirectory sockDir=new TemporarySocketDirectory(); Configuration conf=createShortCircuitConf("testShmBasedStaleness",sockDir); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); DistributedFileSystem fs=cluster.getFileSystem(); final ShortCircuitCache cache=fs.getClient().getClientContext().getShortCircuitCache(); String TEST_FILE="/test_file"; final int TEST_FILE_LEN=8193; final int SEED=0xFADED; DFSTestUtil.createFile(fs,new Path(TEST_FILE),TEST_FILE_LEN,(short)1,SEED); FSDataInputStream fis=fs.open(new Path(TEST_FILE)); int first=fis.read(); final ExtendedBlock block=DFSTestUtil.getFirstBlock(fs,new Path(TEST_FILE)); Assert.assertTrue(first != -1); cache.accept(new CacheVisitor(){ @Override public void visit( int numOutstandingMmaps, Map replicas, Map failedLoads, Map evictable, Map evictableMmapped){ ShortCircuitReplica replica=replicas.get(ExtendedBlockId.fromExtendedBlock(block)); Assert.assertNotNull(replica); Assert.assertTrue(replica.getSlot().isValid()); } } ); cluster.getDataNodes().get(0).shutdown(); cache.accept(new CacheVisitor(){ @Override public void visit( int numOutstandingMmaps, Map replicas, Map failedLoads, Map evictable, Map evictableMmapped){ ShortCircuitReplica replica=replicas.get(ExtendedBlockId.fromExtendedBlock(block)); Assert.assertNotNull(replica); Assert.assertFalse(replica.getSlot().isValid()); } } ); cluster.shutdown(); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test unlinking a file whose blocks we are caching in the DFSClient. * The DataNode will notify the DFSClient that the replica is stale via the * ShortCircuitShm. */ @Test(timeout=60000) public void testUnlinkingReplicasInFileDescriptorCache() throws Exception { BlockReaderTestUtil.enableShortCircuitShmTracing(); TemporarySocketDirectory sockDir=new TemporarySocketDirectory(); Configuration conf=createShortCircuitConf("testUnlinkingReplicasInFileDescriptorCache",sockDir); conf.setLong(DFS_CLIENT_READ_SHORTCIRCUIT_STREAMS_CACHE_EXPIRY_MS_KEY,1000000000L); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); DistributedFileSystem fs=cluster.getFileSystem(); final ShortCircuitCache cache=fs.getClient().getClientContext().getShortCircuitCache(); cache.getDfsClientShmManager().visit(new Visitor(){ @Override public void visit( HashMap info) throws IOException { Assert.assertEquals(0,info.size()); } } ); final Path TEST_PATH=new Path("/test_file"); final int TEST_FILE_LEN=8193; final int SEED=0xFADE0; DFSTestUtil.createFile(fs,TEST_PATH,TEST_FILE_LEN,(short)1,SEED); byte contents[]=DFSTestUtil.readFileBuffer(fs,TEST_PATH); byte expected[]=DFSTestUtil.calculateFileContentsFromSeed(SEED,TEST_FILE_LEN); Assert.assertTrue(Arrays.equals(contents,expected)); final DatanodeInfo datanode=new DatanodeInfo(cluster.getDataNodes().get(0).getDatanodeId()); cache.getDfsClientShmManager().visit(new Visitor(){ @Override public void visit( HashMap info) throws IOException { Assert.assertTrue(info.get(datanode).full.isEmpty()); Assert.assertFalse(info.get(datanode).disabled); Assert.assertEquals(1,info.get(datanode).notFull.values().size()); DfsClientShm shm=info.get(datanode).notFull.values().iterator().next(); Assert.assertFalse(shm.isDisconnected()); } } ); fs.delete(TEST_PATH,false); GenericTestUtils.waitFor(new Supplier(){ MutableBoolean done=new MutableBoolean(true); @Override public Boolean get(){ try { done.setValue(true); cache.getDfsClientShmManager().visit(new Visitor(){ @Override public void visit( HashMap info) throws IOException { Assert.assertTrue(info.get(datanode).full.isEmpty()); Assert.assertFalse(info.get(datanode).disabled); Assert.assertEquals(1,info.get(datanode).notFull.values().size()); DfsClientShm shm=info.get(datanode).notFull.values().iterator().next(); for (Iterator iter=shm.slotIterator(); iter.hasNext(); ) { Slot slot=iter.next(); if (slot.isValid()) { done.setValue(false); } } } } ); } catch ( IOException e) { LOG.error("error running visitor",e); } return done.booleanValue(); } } ,10,60000); cluster.shutdown(); sockDir.close(); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=60000) public void testAllocShm() throws Exception { BlockReaderTestUtil.enableShortCircuitShmTracing(); TemporarySocketDirectory sockDir=new TemporarySocketDirectory(); Configuration conf=createShortCircuitConf("testAllocShm",sockDir); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); DistributedFileSystem fs=cluster.getFileSystem(); final ShortCircuitCache cache=fs.getClient().getClientContext().getShortCircuitCache(); cache.getDfsClientShmManager().visit(new Visitor(){ @Override public void visit( HashMap info) throws IOException { Assert.assertEquals(0,info.size()); } } ); DomainPeer peer=getDomainPeerToDn(conf); MutableBoolean usedPeer=new MutableBoolean(false); ExtendedBlockId blockId=new ExtendedBlockId(123,"xyz"); final DatanodeInfo datanode=new DatanodeInfo(cluster.getDataNodes().get(0).getDatanodeId()); Slot slot=cache.allocShmSlot(datanode,peer,usedPeer,blockId,"testAllocShm_client"); Assert.assertNotNull(slot); Assert.assertTrue(usedPeer.booleanValue()); cache.getDfsClientShmManager().visit(new Visitor(){ @Override public void visit( HashMap info) throws IOException { Assert.assertEquals(1,info.size()); PerDatanodeVisitorInfo vinfo=info.get(datanode); Assert.assertFalse(vinfo.disabled); Assert.assertEquals(0,vinfo.full.size()); Assert.assertEquals(1,vinfo.notFull.size()); } } ); cache.scheduleSlotReleaser(slot); GenericTestUtils.waitFor(new Supplier(){ @Override public Boolean get(){ final MutableBoolean done=new MutableBoolean(false); try { cache.getDfsClientShmManager().visit(new Visitor(){ @Override public void visit( HashMap info) throws IOException { done.setValue(info.get(datanode).full.isEmpty() && info.get(datanode).notFull.isEmpty()); } } ); } catch ( IOException e) { LOG.error("error running visitor",e); } return done.booleanValue(); } } ,10,60000); cluster.shutdown(); sockDir.close(); }

Class: org.apache.hadoop.hdfs.shortcircuit.TestShortCircuitLocalRead

UtilityVerifier BooleanVerifier HybridVerifier 
@Test(timeout=10000) public void testDeprecatedGetBlockLocalPathInfoRpc() throws IOException { final Configuration conf=new Configuration(); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).format(true).build(); cluster.waitActive(); FileSystem fs=cluster.getFileSystem(); try { DFSTestUtil.createFile(fs,new Path("/tmp/x"),16,(short)1,23); LocatedBlocks lb=cluster.getNameNode().getRpcServer().getBlockLocations("/tmp/x",0,16); ExtendedBlock blk=new ExtendedBlock(lb.get(0).getBlock()); Token token=lb.get(0).getBlockToken(); final DatanodeInfo dnInfo=lb.get(0).getLocations()[0]; ClientDatanodeProtocol proxy=DFSUtil.createClientDatanodeProtocolProxy(dnInfo,conf,60000,false); try { proxy.getBlockLocalPathInfo(blk,token); Assert.fail("The call should have failed as this user " + " is not allowed to call getBlockLocalPathInfo"); } catch ( IOException ex) { Assert.assertTrue(ex.getMessage().contains("not allowed to call getBlockLocalPathInfo")); } } finally { fs.close(); cluster.shutdown(); } }

TestInitializer AssumptionSetter HybridVerifier 
@Before public void before(){ Assume.assumeThat(DomainSocket.getLoadingFailureReason(),equalTo(null)); }

Class: org.apache.hadoop.hdfs.shortcircuit.TestShortCircuitShm

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=60000) public void testAllocateSlots() throws Exception { File path=new File(TEST_BASE,"testAllocateSlots"); path.mkdirs(); SharedFileDescriptorFactory factory=SharedFileDescriptorFactory.create("shm_",new String[]{path.getAbsolutePath()}); FileInputStream stream=factory.createDescriptor("testAllocateSlots",4096); ShortCircuitShm shm=new ShortCircuitShm(ShmId.createRandom(),stream); int numSlots=0; ArrayList slots=new ArrayList(); while (!shm.isFull()) { Slot slot=shm.allocAndRegisterSlot(new ExtendedBlockId(123L,"test_bp1")); slots.add(slot); numSlots++; } LOG.info("allocated " + numSlots + " slots before running out."); int slotIdx=0; for (Iterator iter=shm.slotIterator(); iter.hasNext(); ) { Assert.assertTrue(slots.contains(iter.next())); } for ( Slot slot : slots) { Assert.assertFalse(slot.addAnchor()); Assert.assertEquals(slotIdx++,slot.getSlotIdx()); } for ( Slot slot : slots) { slot.makeAnchorable(); } for ( Slot slot : slots) { Assert.assertTrue(slot.addAnchor()); } for ( Slot slot : slots) { slot.removeAnchor(); } for ( Slot slot : slots) { shm.unregisterSlot(slot.getSlotIdx()); slot.makeInvalid(); } shm.free(); stream.close(); FileUtil.fullyDelete(path); }

TestInitializer AssumptionSetter HybridVerifier 
@Before public void before(){ Assume.assumeTrue(null == SharedFileDescriptorFactory.getLoadingFailureReason()); }

Class: org.apache.hadoop.hdfs.tools.TestDFSHAAdmin

BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test that, if automatic HA is enabled, none of the mutative operations * will succeed, unless the -forcemanual flag is specified. * @throws Exception */ @Test public void testMutativeOperationsWithAutoHaEnabled() throws Exception { Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus(); HdfsConfiguration conf=getHAConf(); conf.setBoolean(DFSConfigKeys.DFS_HA_AUTO_FAILOVER_ENABLED_KEY,true); conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY,getFencerTrueCommand()); tool.setConf(conf); assertEquals(-1,runTool("-transitionToActive","nn1")); assertTrue(errOutput.contains("Refusing to manually manage")); assertEquals(-1,runTool("-transitionToStandby","nn1")); assertTrue(errOutput.contains("Refusing to manually manage")); Mockito.verify(mockProtocol,Mockito.never()).transitionToActive(anyReqInfo()); Mockito.verify(mockProtocol,Mockito.never()).transitionToStandby(anyReqInfo()); setupConfirmationOnSystemIn(); assertEquals(0,runTool("-transitionToActive","-forcemanual","nn1")); setupConfirmationOnSystemIn(); assertEquals(0,runTool("-transitionToStandby","-forcemanual","nn1")); Mockito.verify(mockProtocol,Mockito.times(1)).transitionToActive(reqInfoCaptor.capture()); Mockito.verify(mockProtocol,Mockito.times(1)).transitionToStandby(reqInfoCaptor.capture()); for ( StateChangeRequestInfo ri : reqInfoCaptor.getAllValues()) { assertEquals(RequestSource.REQUEST_BY_USER_FORCED,ri.getSource()); } }

Class: org.apache.hadoop.hdfs.tools.TestDFSHAAdminMiniCluster

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testStateTransition() throws Exception { NameNode nnode1=cluster.getNameNode(0); assertTrue(nnode1.isStandbyState()); assertEquals(0,runTool("-transitionToActive","nn1")); assertFalse(nnode1.isStandbyState()); assertEquals(0,runTool("-transitionToStandby","nn1")); assertTrue(nnode1.isStandbyState()); NameNode nnode2=cluster.getNameNode(1); assertTrue(nnode2.isStandbyState()); assertEquals(0,runTool("-transitionToActive","nn2")); assertFalse(nnode2.isStandbyState()); assertEquals(0,runTool("-transitionToStandby","nn2")); assertTrue(nnode2.isStandbyState()); }

BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testTryFailoverToSafeMode() throws Exception { conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY,TestDFSHAAdmin.getFencerTrueCommand()); tool.setConf(conf); NameNodeAdapter.enterSafeMode(cluster.getNameNode(0),false); assertEquals(-1,runTool("-failover","nn2","nn1")); assertTrue("Bad output: " + errOutput,errOutput.contains("is not ready to become active: " + "The NameNode is in safemode")); }

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test failover with various options */ @Test public void testFencer() throws Exception { assertEquals(-1,runTool("-failover","nn1","nn2")); File tmpFile=File.createTempFile("testFencer",".txt"); tmpFile.deleteOnExit(); if (Shell.WINDOWS) { conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY,"shell(echo %target_nameserviceid%.%target_namenodeid% " + "%target_port% %dfs_ha_namenode_id% > " + tmpFile.getAbsolutePath() + ")"); } else { conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY,"shell(echo -n $target_nameserviceid.$target_namenodeid " + "$target_port $dfs_ha_namenode_id > " + tmpFile.getAbsolutePath() + ")"); } tool.setConf(conf); assertEquals(0,runTool("-transitionToActive","nn1")); assertEquals(0,runTool("-failover","nn1","nn2")); assertEquals(0,runTool("-ns","minidfs-ns","-failover","nn2","nn1")); assertEquals("",Files.toString(tmpFile,Charsets.UTF_8)); assertEquals(0,runTool("-failover","nn1","nn2","--forcefence")); String fenceCommandOutput=Files.toString(tmpFile,Charsets.UTF_8).replaceAll(" *[\r\n]+",""); assertEquals("minidfs-ns.nn1 " + nn1Port + " nn1",fenceCommandOutput); tmpFile.delete(); assertEquals(0,runTool("-failover","nn2","nn1","--forceactive")); assertFalse(tmpFile.exists()); conf.unset(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY); tool.setConf(conf); assertEquals(-1,runTool("-failover","nn1","nn2","--forcefence")); assertFalse(tmpFile.exists()); conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY,"foobar!"); tool.setConf(conf); assertEquals(-1,runTool("-failover","nn1","nn2","--forcefence")); assertFalse(tmpFile.exists()); conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY,TestDFSHAAdmin.getFencerTrueCommand()); tool.setConf(conf); assertEquals(0,runTool("-failover","--forcefence","nn1","nn2")); }

Class: org.apache.hadoop.hdfs.tools.TestDelegationTokenFetcher

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Call fetch token using http server */ @Test public void expectedTokenIsRetrievedFromHttp() throws Exception { final Token testToken=new Token("id".getBytes(),"pwd".getBytes(),FakeRenewer.KIND,new Text("127.0.0.1:1234")); WebHdfsFileSystem fs=mock(WebHdfsFileSystem.class); doReturn(testToken).when(fs).getDelegationToken(anyString()); Path p=new Path(f.getRoot().getAbsolutePath(),tokenFile); DelegationTokenFetcher.saveDelegationToken(conf,fs,null,p); Credentials creds=Credentials.readTokenStorageFile(p,conf); Iterator> itr=creds.getAllTokens().iterator(); assertTrue("token not exist error",itr.hasNext()); Token fetchedToken=itr.next(); Assert.assertArrayEquals("token wrong identifier error",testToken.getIdentifier(),fetchedToken.getIdentifier()); Assert.assertArrayEquals("token wrong password error",testToken.getPassword(),fetchedToken.getPassword()); DelegationTokenFetcher.renewTokens(conf,p); Assert.assertEquals(testToken,FakeRenewer.getLastRenewed()); DelegationTokenFetcher.cancelTokens(conf,p); Assert.assertEquals(testToken,FakeRenewer.getLastCanceled()); }

Class: org.apache.hadoop.hdfs.tools.offlineEditsViewer.TestOfflineEditsViewer

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testRecoveryMode() throws IOException { String edits=nnHelper.generateEdits(); FileOutputStream os=new FileOutputStream(edits,true); FileChannel editsFile=os.getChannel(); editsFile.truncate(editsFile.size() - 5); String editsParsedXml=folder.newFile("editsRecoveredParsed.xml").getAbsolutePath(); String editsReparsed=folder.newFile("editsRecoveredReparsed").getAbsolutePath(); String editsParsedXml2=folder.newFile("editsRecoveredParsed2.xml").getAbsolutePath(); assertEquals(-1,runOev(edits,editsParsedXml,"xml",false)); assertEquals(0,runOev(edits,editsParsedXml,"xml",true)); assertEquals(0,runOev(editsParsedXml,editsReparsed,"binary",false)); assertEquals(0,runOev(editsReparsed,editsParsedXml2,"xml",false)); assertTrue("Test round trip",filesEqualIgnoreTrailingZeros(editsParsedXml,editsParsedXml2)); os.close(); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test the OfflineEditsViewer */ @Test public void testGenerated() throws IOException { String edits=nnHelper.generateEdits(); LOG.info("Generated edits=" + edits); String editsParsedXml=folder.newFile("editsParsed.xml").getAbsolutePath(); String editsReparsed=folder.newFile("editsParsed").getAbsolutePath(); assertEquals(0,runOev(edits,editsParsedXml,"xml",false)); assertEquals(0,runOev(editsParsedXml,editsReparsed,"binary",false)); assertTrue("Edits " + edits + " should have all op codes",hasAllOpCodes(edits)); LOG.info("Comparing generated file " + editsReparsed + " with reference file "+ edits); assertTrue("Generated edits and reparsed (bin to XML to bin) should be same",filesEqualIgnoreTrailingZeros(edits,editsReparsed)); }

BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testStored() throws IOException { final String cacheDir=System.getProperty("test.cache.data","build/test/cache"); String editsStored=cacheDir + "/editsStored"; String editsStoredParsedXml=cacheDir + "/editsStoredParsed.xml"; String editsStoredReparsed=cacheDir + "/editsStoredReparsed"; String editsStoredXml=cacheDir + "/editsStored.xml"; assertEquals(0,runOev(editsStored,editsStoredParsedXml,"xml",false)); assertEquals(0,runOev(editsStoredParsedXml,editsStoredReparsed,"binary",false)); assertTrue("Edits " + editsStored + " should have all op codes",hasAllOpCodes(editsStored)); assertTrue("Reference XML edits and parsed to XML should be same",FileUtils.contentEqualsIgnoreEOL(new File(editsStoredXml),new File(editsStoredParsedXml),"UTF-8")); assertTrue("Reference edits and reparsed (bin to XML to bin) should be same",filesEqualIgnoreTrailingZeros(editsStored,editsStoredReparsed)); }

Class: org.apache.hadoop.hdfs.tools.offlineImageViewer.TestOfflineImageViewer

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testFileDistributionCalculator() throws IOException { StringWriter output=new StringWriter(); PrintWriter o=new PrintWriter(output); new FileDistributionCalculator(new Configuration(),0,0,o).visit(new RandomAccessFile(originalFsimage,"r")); o.close(); Pattern p=Pattern.compile("totalFiles = (\\d+)\n"); Matcher matcher=p.matcher(output.getBuffer()); assertTrue(matcher.find() && matcher.groupCount() == 1); int totalFiles=Integer.parseInt(matcher.group(1)); assertEquals(NUM_DIRS * FILES_PER_DIR,totalFiles); p=Pattern.compile("totalDirectories = (\\d+)\n"); matcher=p.matcher(output.getBuffer()); assertTrue(matcher.find() && matcher.groupCount() == 1); int totalDirs=Integer.parseInt(matcher.group(1)); assertEquals(NUM_DIRS + 3,totalDirs); FileStatus maxFile=Collections.max(writtenFiles.values(),new Comparator(){ @Override public int compare( FileStatus first, FileStatus second){ return first.getLen() < second.getLen() ? -1 : ((first.getLen() == second.getLen()) ? 0 : 1); } } ); p=Pattern.compile("maxFileSize = (\\d+)\n"); matcher=p.matcher(output.getBuffer()); assertTrue(matcher.find() && matcher.groupCount() == 1); assertEquals(maxFile.getLen(),Long.parseLong(matcher.group(1))); }

Class: org.apache.hadoop.hdfs.util.TestAtomicFileOutputStream

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test case where there is no existing file */ @Test public void testOverwriteFile() throws IOException { assertTrue("Creating empty dst file",DST_FILE.createNewFile()); OutputStream fos=new AtomicFileOutputStream(DST_FILE); assertTrue("Empty file still exists",DST_FILE.exists()); fos.write(TEST_STRING.getBytes()); fos.flush(); assertEquals("",DFSTestUtil.readFile(DST_FILE)); fos.close(); String readBackData=DFSTestUtil.readFile(DST_FILE); assertEquals(TEST_STRING,readBackData); }

TestInitializer BooleanVerifier HybridVerifier 
@Before public void cleanupTestDir() throws IOException { assertTrue(TEST_DIR.exists() || TEST_DIR.mkdirs()); FileUtil.fullyDeleteContents(TEST_DIR); }

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test case where there is no existing file */ @Test public void testWriteNewFile() throws IOException { OutputStream fos=new AtomicFileOutputStream(DST_FILE); assertFalse(DST_FILE.exists()); fos.write(TEST_STRING.getBytes()); fos.flush(); assertFalse(DST_FILE.exists()); fos.close(); assertTrue(DST_FILE.exists()); String readBackData=DFSTestUtil.readFile(DST_FILE); assertEquals(TEST_STRING,readBackData); }

UtilityVerifier EqualityVerifier HybridVerifier 
/** * Test case where the flush() fails at close time - make sure * that we clean up after ourselves and don't touch any * existing file at the destination */ @Test public void testFailToFlush() throws IOException { FileOutputStream fos=new FileOutputStream(DST_FILE); fos.write(TEST_STRING_2.getBytes()); fos.close(); OutputStream failingStream=createFailingStream(); failingStream.write(TEST_STRING.getBytes()); try { failingStream.close(); fail("Close didn't throw exception"); } catch ( IOException ioe) { } assertEquals(TEST_STRING_2,DFSTestUtil.readFile(DST_FILE)); assertEquals("Temporary file should have been cleaned up",DST_FILE.getName(),Joiner.on(",").join(TEST_DIR.list())); }

Class: org.apache.hadoop.hdfs.util.TestBestEffortLongFile

BranchVerifier TestInitializer BooleanVerifier HybridVerifier 
@Before public void cleanup(){ if (FILE.exists()) { assertTrue(FILE.delete()); } FILE.getParentFile().mkdirs(); }

BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testTruncatedFileReturnsDefault() throws IOException { assertTrue(FILE.createNewFile()); assertEquals(0,FILE.length()); BestEffortLongFile f=new BestEffortLongFile(FILE,12345L); try { assertEquals(12345L,f.get()); } finally { f.close(); } }

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testGetSet() throws IOException { BestEffortLongFile f=new BestEffortLongFile(FILE,12345L); try { assertEquals(12345L,f.get()); assertTrue(FILE.exists()); Random r=new Random(); for (int i=0; i < 100; i++) { long newVal=r.nextLong(); f.set(newVal); assertEquals(newVal,f.get()); BestEffortLongFile f2=new BestEffortLongFile(FILE,999L); try { assertEquals(newVal,f2.get()); } finally { IOUtils.closeStream(f2); } } } finally { IOUtils.closeStream(f); } }

Class: org.apache.hadoop.hdfs.util.TestChunkedArrayList

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testBasics(){ final int N_ELEMS=100000; ChunkedArrayList l=new ChunkedArrayList(); assertTrue(l.isEmpty()); for (int i=0; i < N_ELEMS; i++) { l.add(i); } assertFalse(l.isEmpty()); assertEquals(N_ELEMS,l.size()); assertTrue(l.getNumChunks() > 10); assertEquals(8192,l.getMaxChunkSize()); }

Class: org.apache.hadoop.hdfs.util.TestExactSizeInputStream

UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testReadArrayNotEnough() throws IOException { ExactSizeInputStream s=new ExactSizeInputStream(byteStream("he"),5); byte[] buf=new byte[10]; assertEquals(2,s.read(buf,0,5)); try { s.read(buf,2,3); fail("Read buf when should be out of data"); } catch ( EOFException e) { } }

UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testSkipNotEnough() throws IOException { ExactSizeInputStream s=new ExactSizeInputStream(byteStream("he"),5); assertEquals(2,s.skip(3)); try { s.skip(1); fail("Skip when should be out of data"); } catch ( EOFException e) { } }

UtilityVerifier BooleanVerifier HybridVerifier 
@Test public void testMark() throws IOException { ExactSizeInputStream s=new ExactSizeInputStream(byteStream("he"),5); assertFalse(s.markSupported()); try { s.mark(1); fail("Mark should not succeed"); } catch ( UnsupportedOperationException uoe) { } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testReadNotEnough() throws IOException { ExactSizeInputStream s=new ExactSizeInputStream(byteStream("he"),5); assertEquals(2,s.available()); assertEquals((int)'h',s.read()); assertEquals((int)'e',s.read()); try { s.read(); fail("Read when should be out of data"); } catch ( EOFException e) { } }

Class: org.apache.hadoop.hdfs.util.TestLightWeightHashSet

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testPollNMulti(){ LOG.info("Test pollN multi"); set.addAll(list); List poll=set.pollN(0); assertEquals(0,poll.size()); for ( Integer i : list) { assertTrue(set.contains(i)); } poll=set.pollN(10); assertEquals(10,poll.size()); for ( Integer i : poll) { assertTrue(list.contains(i)); assertFalse(set.contains(i)); } poll=set.pollN(1000); assertEquals(NUM - 10,poll.size()); for ( Integer i : poll) { assertTrue(list.contains(i)); } assertTrue(set.isEmpty()); assertEquals(0,set.size()); LOG.info("Test pollN multi - DONE"); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testRemoveOne(){ LOG.info("Test remove one"); assertTrue(set.add(list.get(0))); assertEquals(1,set.size()); assertTrue(set.remove(list.get(0))); assertEquals(0,set.size()); Iterator iter=set.iterator(); assertFalse(iter.hasNext()); assertTrue(set.add(list.get(0))); assertEquals(1,set.size()); iter=set.iterator(); assertTrue(iter.hasNext()); LOG.info("Test remove one - DONE"); }

IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testOther(){ LOG.info("Test other"); assertTrue(set.addAll(list)); assertTrue(set.removeAll(list)); assertTrue(set.isEmpty()); List sub=new LinkedList(); for (int i=0; i < 10; i++) { sub.add(list.get(i)); } assertTrue(set.addAll(list)); assertTrue(set.removeAll(sub)); assertFalse(set.isEmpty()); assertEquals(NUM - 10,set.size()); for ( Integer i : sub) { assertFalse(set.contains(i)); } assertFalse(set.containsAll(sub)); List sub2=new LinkedList(); for (int i=10; i < NUM; i++) { sub2.add(list.get(i)); } assertTrue(set.containsAll(sub2)); Integer[] array=set.toArray(new Integer[0]); assertEquals(NUM - 10,array.length); for (int i=0; i < array.length; i++) { assertTrue(sub2.contains(array[i])); } assertEquals(NUM - 10,set.size()); Object[] array2=set.toArray(); assertEquals(NUM - 10,array2.length); for (int i=0; i < array2.length; i++) { assertTrue(sub2.contains(array2[i])); } LOG.info("Test other - DONE"); }

IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testPollNMultiArray(){ LOG.info("Test pollN multi array"); set.addAll(list); Integer[] poll=new Integer[10]; poll=set.pollToArray(poll); assertEquals(10,poll.length); for ( Integer i : poll) { assertTrue(list.contains(i)); assertFalse(set.contains(i)); } poll=new Integer[NUM]; poll=set.pollToArray(poll); assertEquals(NUM - 10,poll.length); for (int i=0; i < NUM - 10; i++) { assertTrue(list.contains(poll[i])); } assertTrue(set.isEmpty()); assertEquals(0,set.size()); set.addAll(list); poll=new Integer[NUM]; poll=set.pollToArray(poll); assertTrue(set.isEmpty()); assertEquals(0,set.size()); assertEquals(NUM,poll.length); for (int i=0; i < NUM; i++) { assertTrue(list.contains(poll[i])); } set.addAll(list); poll=new Integer[0]; poll=set.pollToArray(poll); for (int i=0; i < NUM; i++) { assertTrue(set.contains(list.get(i))); } assertEquals(0,poll.length); LOG.info("Test pollN multi array- DONE"); }

InternalCallVerifier IdentityVerifier NullVerifier HybridVerifier 
@Test public void testGetElement(){ LightWeightHashSet objSet=new LightWeightHashSet(); TestObject objA=new TestObject("object A"); TestObject equalToObjA=new TestObject("object A"); TestObject objB=new TestObject("object B"); objSet.add(objA); objSet.add(objB); assertSame(objA,objSet.getElement(objA)); assertSame(objA,objSet.getElement(equalToObjA)); assertSame(objB,objSet.getElement(objB)); assertNull(objSet.getElement(new TestObject("not in set"))); }

IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testPollAll(){ LOG.info("Test poll all"); for ( Integer i : list) { assertTrue(set.add(i)); } List poll=set.pollAll(); assertEquals(0,set.size()); assertTrue(set.isEmpty()); for (int i=0; i < NUM; i++) { assertFalse(set.contains(list.get(i))); } for ( Integer i : poll) { assertTrue(list.contains(i)); } Iterator iter=set.iterator(); assertFalse(iter.hasNext()); LOG.info("Test poll all - DONE"); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testEmptyBasic(){ LOG.info("Test empty basic"); Iterator iter=set.iterator(); assertFalse(iter.hasNext()); assertEquals(0,set.size()); assertTrue(set.isEmpty()); LOG.info("Test empty - DONE"); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testOneElementBasic(){ LOG.info("Test one element basic"); set.add(list.get(0)); assertEquals(1,set.size()); assertFalse(set.isEmpty()); Iterator iter=set.iterator(); assertTrue(iter.hasNext()); assertEquals(list.get(0),iter.next()); assertFalse(iter.hasNext()); LOG.info("Test one element basic - DONE"); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testClear(){ LOG.info("Test clear"); set.addAll(list); assertEquals(NUM,set.size()); assertFalse(set.isEmpty()); set.clear(); assertEquals(0,set.size()); assertTrue(set.isEmpty()); Iterator iter=set.iterator(); assertFalse(iter.hasNext()); LOG.info("Test clear - DONE"); }

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testMultiBasic(){ LOG.info("Test multi element basic"); for ( Integer i : list) { assertTrue(set.add(i)); } assertEquals(list.size(),set.size()); for ( Integer i : list) { assertTrue(set.contains(i)); } for ( Integer i : list) { assertFalse(set.add(i)); } for ( Integer i : list) { assertTrue(set.contains(i)); } Iterator iter=set.iterator(); int num=0; while (iter.hasNext()) { Integer next=iter.next(); assertNotNull(next); assertTrue(list.contains(next)); num++; } assertEquals(list.size(),num); LOG.info("Test multi element basic - DONE"); }

Class: org.apache.hadoop.hdfs.util.TestLightWeightLinkedSet

IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testPollMulti(){ LOG.info("Test poll multi"); for ( Integer i : list) { assertTrue(set.add(i)); } for (int i=0; i < NUM / 2; i++) { assertEquals(list.get(i),set.pollFirst()); } assertEquals(NUM / 2,set.size()); for (int i=0; i < NUM / 2; i++) { assertFalse(set.contains(list.get(i))); } for (int i=NUM / 2; i < NUM; i++) { assertTrue(set.contains(list.get(i))); } Iterator iter=set.iterator(); int num=NUM / 2; while (iter.hasNext()) { assertEquals(list.get(num++),iter.next()); } assertEquals(num,NUM); for (int i=0; i < NUM / 2; i++) { assertTrue(set.add(list.get(i))); } assertEquals(NUM,set.size()); for (int i=NUM / 2; i < NUM; i++) { assertEquals(list.get(i),set.pollFirst()); } for (int i=0; i < NUM / 2; i++) { assertEquals(list.get(i),set.pollFirst()); } assertEquals(0,set.size()); assertTrue(set.isEmpty()); LOG.info("Test poll multi - DONE"); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=60000) public void testBookmarkSetToHeadOnAddToEmpty(){ LOG.info("Test bookmark is set after adding to previously empty set."); Iterator it=set.getBookmark(); assertFalse(it.hasNext()); set.add(list.get(0)); set.add(list.get(1)); it=set.getBookmark(); assertTrue(it.hasNext()); assertEquals(it.next(),list.get(0)); assertEquals(it.next(),list.get(1)); assertFalse(it.hasNext()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=60000) public void testGetBookmarkReturnsBookmarkIterator(){ LOG.info("Test getBookmark returns proper iterator"); assertTrue(set.addAll(list)); Iterator bookmark=set.getBookmark(); assertEquals(bookmark.next(),list.get(0)); final int numAdvance=list.size() / 2; for (int i=1; i < numAdvance; i++) { bookmark.next(); } Iterator bookmark2=set.getBookmark(); assertEquals(bookmark2.next(),list.get(numAdvance)); }

IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testRemoveMulti(){ LOG.info("Test remove multi"); for ( Integer i : list) { assertTrue(set.add(i)); } for (int i=0; i < NUM / 2; i++) { assertTrue(set.remove(list.get(i))); } for (int i=0; i < NUM / 2; i++) { assertFalse(set.contains(list.get(i))); } for (int i=NUM / 2; i < NUM; i++) { assertTrue(set.contains(list.get(i))); } Iterator iter=set.iterator(); int num=NUM / 2; while (iter.hasNext()) { assertEquals(list.get(num++),iter.next()); } assertEquals(num,NUM); LOG.info("Test remove multi - DONE"); }

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testEmptyBasic(){ LOG.info("Test empty basic"); Iterator iter=set.iterator(); assertFalse(iter.hasNext()); assertEquals(0,set.size()); assertTrue(set.isEmpty()); assertNull(set.pollFirst()); assertEquals(0,set.pollAll().size()); assertEquals(0,set.pollN(10).size()); LOG.info("Test empty - DONE"); }

IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testMultiBasic(){ LOG.info("Test multi element basic"); for ( Integer i : list) { assertTrue(set.add(i)); } assertEquals(list.size(),set.size()); for ( Integer i : list) { assertTrue(set.contains(i)); } for ( Integer i : list) { assertFalse(set.add(i)); } for ( Integer i : list) { assertTrue(set.contains(i)); } Iterator iter=set.iterator(); int num=0; while (iter.hasNext()) { assertEquals(list.get(num++),iter.next()); } assertEquals(list.size(),num); LOG.info("Test multi element basic - DONE"); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=60000) public void testBookmarkAdvancesOnRemoveOfSameElement(){ LOG.info("Test that the bookmark advances if we remove its element."); assertTrue(set.add(list.get(0))); assertTrue(set.add(list.get(1))); assertTrue(set.add(list.get(2))); Iterator it=set.getBookmark(); assertEquals(it.next(),list.get(0)); set.remove(list.get(1)); it=set.getBookmark(); assertEquals(it.next(),list.get(2)); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testPollOneElement(){ LOG.info("Test poll one element"); set.add(list.get(0)); assertEquals(list.get(0),set.pollFirst()); assertNull(set.pollFirst()); LOG.info("Test poll one element - DONE"); }

IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testPollAll(){ LOG.info("Test poll all"); for ( Integer i : list) { assertTrue(set.add(i)); } while (set.pollFirst() != null) ; assertEquals(0,set.size()); assertTrue(set.isEmpty()); for (int i=0; i < NUM; i++) { assertFalse(set.contains(list.get(i))); } Iterator iter=set.iterator(); assertFalse(iter.hasNext()); LOG.info("Test poll all - DONE"); }

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testRemoveOne(){ LOG.info("Test remove one"); assertTrue(set.add(list.get(0))); assertEquals(1,set.size()); assertTrue(set.remove(list.get(0))); assertEquals(0,set.size()); Iterator iter=set.iterator(); assertFalse(iter.hasNext()); assertNull(set.pollFirst()); assertEquals(0,set.pollAll().size()); assertEquals(0,set.pollN(10).size()); assertTrue(set.add(list.get(0))); assertEquals(1,set.size()); iter=set.iterator(); assertTrue(iter.hasNext()); LOG.info("Test remove one - DONE"); }

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testClear(){ LOG.info("Test clear"); set.addAll(list); assertEquals(NUM,set.size()); assertFalse(set.isEmpty()); Iterator bkmrkIt=set.getBookmark(); for (int i=0; i < set.size() / 2 + 1; i++) { bkmrkIt.next(); } assertTrue(bkmrkIt.hasNext()); set.clear(); assertEquals(0,set.size()); assertTrue(set.isEmpty()); bkmrkIt=set.getBookmark(); assertFalse(bkmrkIt.hasNext()); assertEquals(0,set.pollAll().size()); assertEquals(0,set.pollN(10).size()); assertNull(set.pollFirst()); Iterator iter=set.iterator(); assertFalse(iter.hasNext()); LOG.info("Test clear - DONE"); }

IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testOther(){ LOG.info("Test other"); assertTrue(set.addAll(list)); Integer[] array=set.toArray(new Integer[0]); assertEquals(NUM,array.length); for (int i=0; i < array.length; i++) { assertTrue(list.contains(array[i])); } assertEquals(NUM,set.size()); Object[] array2=set.toArray(); assertEquals(NUM,array2.length); for (int i=0; i < array2.length; i++) { assertTrue(list.contains(array2[i])); } LOG.info("Test capacity - DONE"); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testOneElementBasic(){ LOG.info("Test one element basic"); set.add(list.get(0)); assertEquals(1,set.size()); assertFalse(set.isEmpty()); Iterator iter=set.iterator(); assertTrue(iter.hasNext()); assertEquals(list.get(0),iter.next()); assertFalse(iter.hasNext()); LOG.info("Test one element basic - DONE"); }

IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testPollNMulti(){ LOG.info("Test pollN multi"); set.addAll(list); List l=set.pollN(10); assertEquals(10,l.size()); for (int i=0; i < 10; i++) { assertEquals(list.get(i),l.get(i)); } l=set.pollN(1000); assertEquals(NUM - 10,l.size()); for (int i=10; i < NUM; i++) { assertEquals(list.get(i),l.get(i - 10)); } assertTrue(set.isEmpty()); assertEquals(0,set.size()); LOG.info("Test pollN multi - DONE"); }

Class: org.apache.hadoop.hdfs.util.TestMD5FileUtils

TestInitializer BooleanVerifier HybridVerifier 
@Before public void setup() throws IOException { FileUtil.fullyDelete(TEST_DIR); assertTrue(TEST_DIR.mkdirs()); FileOutputStream fos=new FileOutputStream(TEST_FILE); fos.write(TEST_DATA); fos.close(); }

Class: org.apache.hadoop.hdfs.web.TestByteRangeInputStream

UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testByteRange() throws IOException { ByteRangeInputStream.URLOpener oMock=getMockURLOpener(new URL("http://test")); ByteRangeInputStream.URLOpener rMock=getMockURLOpener(null); ByteRangeInputStream bris=new ByteRangeInputStreamImpl(oMock,rMock); bris.seek(0); assertEquals("getPos wrong",0,bris.getPos()); bris.read(); assertEquals("Initial call made incorrectly (offset check)",0,bris.startPos); assertEquals("getPos should return 1 after reading one byte",1,bris.getPos()); verify(oMock,times(1)).connect(0,false); bris.read(); assertEquals("getPos should return 2 after reading two bytes",2,bris.getPos()); verify(oMock,times(1)).connect(0,false); rMock.setURL(new URL("http://resolvedurl/")); bris.seek(100); bris.read(); assertEquals("Seek to 100 bytes made incorrectly (offset Check)",100,bris.startPos); assertEquals("getPos should return 101 after reading one byte",101,bris.getPos()); verify(rMock,times(1)).connect(100,true); bris.seek(101); bris.read(); verify(rMock,times(1)).connect(100,true); verify(rMock,times(0)).connect(101,true); bris.seek(2500); bris.read(); assertEquals("Seek to 2500 bytes made incorrectly (offset Check)",2500,bris.startPos); doReturn(getMockConnection(null)).when(rMock).connect(anyLong(),anyBoolean()); bris.seek(500); try { bris.read(); fail("Exception should be thrown when content-length is not given"); } catch ( IOException e) { assertTrue("Incorrect response message: " + e.getMessage(),e.getMessage().startsWith(HttpHeaders.CONTENT_LENGTH + " is missing: ")); } bris.close(); }

BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testPropagatedClose() throws IOException { ByteRangeInputStream bris=mock(ByteRangeInputStream.class,CALLS_REAL_METHODS); InputStream mockStream=mock(InputStream.class); doReturn(mockStream).when(bris).openInputStream(); Whitebox.setInternalState(bris,"status",ByteRangeInputStream.StreamStatus.SEEK); int brisOpens=0; int brisCloses=0; int isCloses=0; bris.getInputStream(); verify(bris,times(++brisOpens)).openInputStream(); verify(bris,times(brisCloses)).close(); verify(mockStream,times(isCloses)).close(); bris.getInputStream(); verify(bris,times(brisOpens)).openInputStream(); verify(bris,times(brisCloses)).close(); verify(mockStream,times(isCloses)).close(); bris.seek(1); bris.getInputStream(); verify(bris,times(++brisOpens)).openInputStream(); verify(bris,times(brisCloses)).close(); verify(mockStream,times(++isCloses)).close(); bris.getInputStream(); verify(bris,times(brisOpens)).openInputStream(); verify(bris,times(brisCloses)).close(); verify(mockStream,times(isCloses)).close(); bris.seek(1); bris.getInputStream(); verify(bris,times(brisOpens)).openInputStream(); verify(bris,times(brisCloses)).close(); verify(mockStream,times(isCloses)).close(); bris.close(); verify(bris,times(++brisCloses)).close(); verify(mockStream,times(++isCloses)).close(); bris.close(); verify(bris,times(++brisCloses)).close(); verify(mockStream,times(isCloses)).close(); boolean errored=false; try { bris.getInputStream(); } catch ( IOException e) { errored=true; assertEquals("Stream closed",e.getMessage()); } finally { assertTrue("Read a closed steam",errored); } verify(bris,times(brisOpens)).openInputStream(); verify(bris,times(brisCloses)).close(); verify(mockStream,times(isCloses)).close(); }

Class: org.apache.hadoop.hdfs.web.TestFSMainOperationsWebHdfs

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testConcat() throws Exception { Path[] paths={new Path("/test/hadoop/file1"),new Path("/test/hadoop/file2"),new Path("/test/hadoop/file3")}; DFSTestUtil.createFile(fSys,paths[0],1024,(short)3,0); DFSTestUtil.createFile(fSys,paths[1],1024,(short)3,0); DFSTestUtil.createFile(fSys,paths[2],1024,(short)3,0); Path catPath=new Path("/test/hadoop/catFile"); DFSTestUtil.createFile(fSys,catPath,1024,(short)3,0); Assert.assertTrue(exists(fSys,catPath)); fSys.concat(catPath,paths); Assert.assertFalse(exists(fSys,paths[0])); Assert.assertFalse(exists(fSys,paths[1])); Assert.assertFalse(exists(fSys,paths[2])); FileStatus fileStatus=fSys.getFileStatus(catPath); Assert.assertEquals(1024 * 4,fileStatus.getLen()); }

UtilityVerifier BooleanVerifier HybridVerifier 
@Override @Test public void testMkdirsFailsForSubdirectoryOfExistingFile() throws Exception { Path testDir=getTestRootPath(fSys,"test/hadoop"); Assert.assertFalse(exists(fSys,testDir)); fSys.mkdirs(testDir); Assert.assertTrue(exists(fSys,testDir)); createFile(getTestRootPath(fSys,"test/hadoop/file")); Path testSubDir=getTestRootPath(fSys,"test/hadoop/file/subdir"); try { fSys.mkdirs(testSubDir); Assert.fail("Should throw IOException."); } catch ( IOException e) { } try { Assert.assertFalse(exists(fSys,testSubDir)); } catch ( AccessControlException e) { } Path testDeepSubDir=getTestRootPath(fSys,"test/hadoop/file/deep/sub/dir"); try { fSys.mkdirs(testDeepSubDir); Assert.fail("Should throw IOException."); } catch ( IOException e) { } try { Assert.assertFalse(exists(fSys,testDeepSubDir)); } catch ( AccessControlException e) { } }

Class: org.apache.hadoop.hdfs.web.TestHttpsFileSystem

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testSWebHdfsFileSystem() throws Exception { FileSystem fs=WebHdfsTestUtil.getWebHdfsFileSystem(conf,"swebhdfs"); final Path f=new Path("/testswebhdfs"); FSDataOutputStream os=fs.create(f); os.write(23); os.close(); Assert.assertTrue(fs.exists(f)); InputStream is=fs.open(f); Assert.assertEquals(23,is.read()); is.close(); fs.close(); }

Class: org.apache.hadoop.hdfs.web.TestTokenAspect

APIUtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier HybridVerifier 
@Test public void testRenewal() throws Exception { Configuration conf=new Configuration(); Token token1=mock(Token.class); Token token2=mock(Token.class); final long renewCycle=100; DelegationTokenRenewer.renewCycle=renewCycle; UserGroupInformation ugi=UserGroupInformation.createUserForTesting("foo",new String[]{"bar"}); DummyFs fs=spy(new DummyFs()); doReturn(token1).doReturn(token2).when(fs).getDelegationToken(null); doReturn(token1).when(fs).getRenewToken(); doThrow(new IOException("renew failed")).when(token1).renew(conf); doThrow(new IOException("get failed")).when(fs).addDelegationTokens(null,null); final URI uri=new URI("dummyfs://127.0.0.1:1234"); TokenAspect tokenAspect=new TokenAspect(fs,SecurityUtil.buildTokenService(uri),DummyFs.TOKEN_KIND); fs.initialize(uri,conf); tokenAspect.initDelegationToken(ugi); tokenAspect.ensureTokenInitialized(); DelegationTokenRenewer.RenewAction action=getActionFromTokenAspect(tokenAspect); verify(fs).setDelegationToken(token1); assertTrue(action.isValid()); Thread.sleep(renewCycle * 2); assertSame(action,getActionFromTokenAspect(tokenAspect)); assertFalse(action.isValid()); tokenAspect.ensureTokenInitialized(); verify(fs,times(2)).getDelegationToken(anyString()); verify(fs).setDelegationToken(token2); assertNotSame(action,getActionFromTokenAspect(tokenAspect)); action=getActionFromTokenAspect(tokenAspect); assertTrue(action.isValid()); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testInitWithUGIToken() throws IOException, URISyntaxException { Configuration conf=new Configuration(); DummyFs fs=spy(new DummyFs()); doReturn(null).when(fs).getDelegationToken(anyString()); Token token=new Token(new byte[0],new byte[0],DummyFs.TOKEN_KIND,new Text("127.0.0.1:1234")); fs.ugi.addToken(token); fs.ugi.addToken(new Token(new byte[0],new byte[0],new Text("Other token"),new Text("127.0.0.1:8021"))); assertEquals("wrong tokens in user",2,fs.ugi.getTokens().size()); fs.emulateSecurityEnabled=true; fs.initialize(new URI("dummyfs://127.0.0.1:1234"),conf); fs.tokenAspect.ensureTokenInitialized(); verify(fs).setDelegationToken(token); verify(fs,never()).getDelegationToken(anyString()); assertNull(Whitebox.getInternalState(fs.tokenAspect,"dtRenewer")); assertNull(Whitebox.getInternalState(fs.tokenAspect,"action")); }

Class: org.apache.hadoop.hdfs.web.TestWebHDFS

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=300000) public void testLargeDirectory() throws Exception { final Configuration conf=WebHdfsTestUtil.createConf(); final int listLimit=2; conf.setInt(DFSConfigKeys.DFS_LIST_LIMIT,listLimit); FsPermission.setUMask(conf,new FsPermission((short)0077)); final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); try { cluster.waitActive(); WebHdfsTestUtil.getWebHdfsFileSystem(conf,WebHdfsFileSystem.SCHEME).setPermission(new Path("/"),new FsPermission(FsAction.ALL,FsAction.ALL,FsAction.ALL)); UserGroupInformation.setLoginUser(UserGroupInformation.createUserForTesting("not-superuser",new String[]{"not-supergroup"})); UserGroupInformation.createUserForTesting("me",new String[]{"my-group"}).doAs(new PrivilegedExceptionAction(){ @Override public Void run() throws IOException, URISyntaxException { FileSystem fs=WebHdfsTestUtil.getWebHdfsFileSystem(conf,WebHdfsFileSystem.SCHEME); Path d=new Path("/my-dir"); Assert.assertTrue(fs.mkdirs(d)); for (int i=0; i < listLimit * 3; i++) { Path p=new Path(d,"file-" + i); Assert.assertTrue(fs.createNewFile(p)); } Assert.assertEquals(listLimit * 3,fs.listStatus(d).length); return null; } } ); } finally { cluster.shutdown(); } }

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
/** * Test snapshot creation through WebHdfs */ @Test public void testWebHdfsCreateSnapshot() throws Exception { MiniDFSCluster cluster=null; final Configuration conf=WebHdfsTestUtil.createConf(); try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).build(); cluster.waitActive(); final DistributedFileSystem dfs=cluster.getFileSystem(); final FileSystem webHdfs=WebHdfsTestUtil.getWebHdfsFileSystem(conf,WebHdfsFileSystem.SCHEME); final Path foo=new Path("/foo"); dfs.mkdirs(foo); try { webHdfs.createSnapshot(foo); fail("Cannot create snapshot on a non-snapshottable directory"); } catch ( Exception e) { GenericTestUtils.assertExceptionContains("Directory is not a snapshottable directory",e); } dfs.allowSnapshot(foo); webHdfs.createSnapshot(foo,"s1"); final Path spath=webHdfs.createSnapshot(foo,null); Assert.assertTrue(webHdfs.exists(spath)); final Path s1path=SnapshotTestHelper.getSnapshotRoot(foo,"s1"); Assert.assertTrue(webHdfs.exists(s1path)); } finally { if (cluster != null) { cluster.shutdown(); } } }

Class: org.apache.hadoop.hdfs.web.TestWebHdfsContentLength

APIUtilityVerifier UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testGetOpWithRedirect(){ Future future1=contentLengthFuture(redirectResponse); Future future2=contentLengthFuture(errResponse); try { fs.open(p).read(); Assert.fail(); } catch ( IOException ioe) { } Assert.assertEquals(null,getContentLength(future1)); Assert.assertEquals(null,getContentLength(future2)); }

APIUtilityVerifier UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testPutOpWithRedirect(){ Future future1=contentLengthFuture(redirectResponse); Future future2=contentLengthFuture(errResponse); try { FSDataOutputStream os=fs.create(p); os.write(new byte[]{0}); os.close(); Assert.fail(); } catch ( IOException ioe) { } Assert.assertEquals("0",getContentLength(future1)); Assert.assertEquals("chunked",getContentLength(future2)); }

APIUtilityVerifier UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testDelete(){ Future future=contentLengthFuture(errResponse); try { fs.delete(p,false); Assert.fail(); } catch ( IOException ioe) { } Assert.assertEquals(null,getContentLength(future)); }

APIUtilityVerifier UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testPutOp(){ Future future=contentLengthFuture(errResponse); try { fs.mkdirs(p); Assert.fail(); } catch ( IOException ioe) { } Assert.assertEquals("0",getContentLength(future)); }

APIUtilityVerifier UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testPostOp(){ Future future=contentLengthFuture(errResponse); try { fs.concat(p,new Path[]{p}); Assert.fail(); } catch ( IOException ioe) { } Assert.assertEquals("0",getContentLength(future)); }

APIUtilityVerifier UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testPostOpWithRedirect(){ Future future1=contentLengthFuture(redirectResponse); Future future2=contentLengthFuture(errResponse); try { FSDataOutputStream os=fs.append(p); os.write(new byte[]{0}); os.close(); Assert.fail(); } catch ( IOException ioe) { } Assert.assertEquals("0",getContentLength(future1)); Assert.assertEquals("chunked",getContentLength(future2)); }

APIUtilityVerifier UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testGetOp() throws Exception { Future future=contentLengthFuture(errResponse); try { fs.getFileStatus(p); Assert.fail(); } catch ( IOException ioe) { } Assert.assertEquals(null,getContentLength(future)); }

Class: org.apache.hadoop.hdfs.web.TestWebHdfsTimeouts

UtilityVerifier EqualityVerifier HybridVerifier 
/** * Expect connect timeout, because the connection backlog is consumed. */ @Test(timeout=TEST_TIMEOUT) public void testConnectTimeout() throws Exception { consumeConnectionBacklog(); try { fs.listFiles(new Path("/"),false); fail("expected timeout"); } catch ( SocketTimeoutException e) { assertEquals("connect timed out",e.getMessage()); } }

UtilityVerifier EqualityVerifier HybridVerifier 
/** * On the second step of two-step write, expect read timeout accessing the * redirect location, because the bogus server never sends a reply. */ @Test(timeout=TEST_TIMEOUT) public void testTwoStepWriteReadTimeout() throws Exception { startSingleTemporaryRedirectResponseThread(false); OutputStream os=null; try { os=fs.create(new Path("/file")); os.close(); os=null; fail("expected timeout"); } catch ( SocketTimeoutException e) { assertEquals("Read timed out",e.getMessage()); } finally { IOUtils.cleanup(LOG,os); } }

UtilityVerifier EqualityVerifier HybridVerifier 
/** * Expect read timeout, because the bogus server never sends a reply. */ @Test(timeout=TEST_TIMEOUT) public void testReadTimeout() throws Exception { try { fs.listFiles(new Path("/"),false); fail("expected timeout"); } catch ( SocketTimeoutException e) { assertEquals("Read timed out",e.getMessage()); } }

UtilityVerifier EqualityVerifier HybridVerifier 
/** * After a redirect, expect read timeout accessing the redirect location, * because the bogus server never sends a reply. */ @Test(timeout=TEST_TIMEOUT) public void testRedirectReadTimeout() throws Exception { startSingleTemporaryRedirectResponseThread(false); try { fs.getFileChecksum(new Path("/file")); fail("expected timeout"); } catch ( SocketTimeoutException e) { assertEquals("Read timed out",e.getMessage()); } }

UtilityVerifier EqualityVerifier HybridVerifier 
/** * After a redirect, expect connect timeout accessing the redirect location, * because the connection backlog is consumed. */ @Test(timeout=TEST_TIMEOUT) public void testRedirectConnectTimeout() throws Exception { startSingleTemporaryRedirectResponseThread(true); try { fs.getFileChecksum(new Path("/file")); fail("expected timeout"); } catch ( SocketTimeoutException e) { assertEquals("connect timed out",e.getMessage()); } }

UtilityVerifier EqualityVerifier HybridVerifier 
/** * On the second step of two-step write, expect connect timeout accessing the * redirect location, because the connection backlog is consumed. */ @Test(timeout=TEST_TIMEOUT) public void testTwoStepWriteConnectTimeout() throws Exception { startSingleTemporaryRedirectResponseThread(true); OutputStream os=null; try { os=fs.create(new Path("/file")); fail("expected timeout"); } catch ( SocketTimeoutException e) { assertEquals("connect timed out",e.getMessage()); } finally { IOUtils.cleanup(LOG,os); } }

UtilityVerifier EqualityVerifier HybridVerifier 
/** * Expect read timeout on a URL that requires auth, because the bogus server * never sends a reply. */ @Test(timeout=TEST_TIMEOUT) public void testAuthUrlReadTimeout() throws Exception { try { fs.getDelegationToken("renewer"); fail("expected timeout"); } catch ( SocketTimeoutException e) { assertEquals("Read timed out",e.getMessage()); } }

UtilityVerifier EqualityVerifier HybridVerifier 
/** * Expect connect timeout on a URL that requires auth, because the connection * backlog is consumed. */ @Test(timeout=TEST_TIMEOUT) public void testAuthUrlConnectTimeout() throws Exception { consumeConnectionBacklog(); try { fs.getDelegationToken("renewer"); fail("expected timeout"); } catch ( SocketTimeoutException e) { assertEquals("connect timed out",e.getMessage()); } }

Class: org.apache.hadoop.hdfs.web.resources.TestParam

UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testAclPermissionParam(){ final AclPermissionParam p=new AclPermissionParam("user::rwx,group::r--,other::rwx,user:user1:rwx"); List setAclList=AclEntry.parseAclSpec("user::rwx,group::r--,other::rwx,user:user1:rwx",true); Assert.assertEquals(setAclList.toString(),p.getAclPermission(true).toString()); new AclPermissionParam("user::rw-,group::rwx,other::rw-,user:user1:rwx"); try { new AclPermissionParam("user::rw--,group::rwx-,other::rw-"); Assert.fail(); } catch ( IllegalArgumentException e) { LOG.info("EXPECTED: " + e); } new AclPermissionParam("user::rw-,group::rwx,other::rw-,user:user1:rwx,group:group1:rwx,other::rwx,mask::rwx,default:user:user1:rwx"); try { new AclPermissionParam("user:r-,group:rwx,other:rw-"); Assert.fail(); } catch ( IllegalArgumentException e) { LOG.info("EXPECTED: " + e); } try { new AclPermissionParam("default:::r-,default:group::rwx,other::rw-"); Assert.fail(); } catch ( IllegalArgumentException e) { LOG.info("EXPECTED: " + e); } try { new AclPermissionParam("user:r-,group::rwx,other:rw-,mask:rw-,temp::rwx"); Assert.fail(); } catch ( IllegalArgumentException e) { LOG.info("EXPECTED: " + e); } }

UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testModificationTimeParam(){ final ModificationTimeParam p=new ModificationTimeParam(ModificationTimeParam.DEFAULT); Assert.assertEquals(-1L,p.getValue().longValue()); new ModificationTimeParam(-1L); try { new ModificationTimeParam(-2L); Assert.fail(); } catch ( IllegalArgumentException e) { LOG.info("EXPECTED: " + e); } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testReplicationParam(){ final ReplicationParam p=new ReplicationParam(ReplicationParam.DEFAULT); Assert.assertEquals(null,p.getValue()); Assert.assertEquals((short)conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY,DFSConfigKeys.DFS_REPLICATION_DEFAULT),p.getValue(conf)); new ReplicationParam((short)1); try { new ReplicationParam((short)0); Assert.fail(); } catch ( IllegalArgumentException e) { LOG.info("EXPECTED: " + e); } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testBufferSizeParam(){ final BufferSizeParam p=new BufferSizeParam(BufferSizeParam.DEFAULT); Assert.assertEquals(null,p.getValue()); Assert.assertEquals(conf.getInt(CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY,CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT),p.getValue(conf)); new BufferSizeParam(1); try { new BufferSizeParam(0); Assert.fail(); } catch ( IllegalArgumentException e) { LOG.info("EXPECTED: " + e); } }

UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testRecursiveParam(){ final RecursiveParam p=new RecursiveParam(RecursiveParam.DEFAULT); Assert.assertEquals(false,p.getValue()); new RecursiveParam("falSe"); try { new RecursiveParam("abc"); Assert.fail(); } catch ( IllegalArgumentException e) { LOG.info("EXPECTED: " + e); } }

UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testOverwriteParam(){ final OverwriteParam p=new OverwriteParam(OverwriteParam.DEFAULT); Assert.assertEquals(false,p.getValue()); new OverwriteParam("trUe"); try { new OverwriteParam("abc"); Assert.fail(); } catch ( IllegalArgumentException e) { LOG.info("EXPECTED: " + e); } }

UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testDestinationParam(){ final DestinationParam p=new DestinationParam(DestinationParam.DEFAULT); Assert.assertEquals(null,p.getValue()); new DestinationParam("/abc"); try { new DestinationParam("abc"); Assert.fail(); } catch ( IllegalArgumentException e) { LOG.info("EXPECTED: " + e); } }

UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testPermissionParam(){ final PermissionParam p=new PermissionParam(PermissionParam.DEFAULT); Assert.assertEquals(new FsPermission((short)0755),p.getFsPermission()); new PermissionParam("0"); try { new PermissionParam("-1"); Assert.fail(); } catch ( IllegalArgumentException e) { LOG.info("EXPECTED: " + e); } new PermissionParam("1777"); try { new PermissionParam("2000"); Assert.fail(); } catch ( IllegalArgumentException e) { LOG.info("EXPECTED: " + e); } try { new PermissionParam("8"); Assert.fail(); } catch ( IllegalArgumentException e) { LOG.info("EXPECTED: " + e); } try { new PermissionParam("abc"); Assert.fail(); } catch ( IllegalArgumentException e) { LOG.info("EXPECTED: " + e); } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testBlockSizeParam(){ final BlockSizeParam p=new BlockSizeParam(BlockSizeParam.DEFAULT); Assert.assertEquals(null,p.getValue()); Assert.assertEquals(conf.getLongBytes(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT),p.getValue(conf)); new BlockSizeParam(1L); try { new BlockSizeParam(0L); Assert.fail(); } catch ( IllegalArgumentException e) { LOG.info("EXPECTED: " + e); } }

UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testAccessTimeParam(){ final AccessTimeParam p=new AccessTimeParam(AccessTimeParam.DEFAULT); Assert.assertEquals(-1L,p.getValue().longValue()); new AccessTimeParam(-1L); try { new AccessTimeParam(-2L); Assert.fail(); } catch ( IllegalArgumentException e) { LOG.info("EXPECTED: " + e); } }

Class: org.apache.hadoop.http.TestHttpRequestLog

NullVerifier EqualityVerifier HybridVerifier 
@Test public void testAppenderDefined(){ HttpRequestLogAppender requestLogAppender=new HttpRequestLogAppender(); requestLogAppender.setName("testrequestlog"); Logger.getLogger("http.requests.test").addAppender(requestLogAppender); RequestLog requestLog=HttpRequestLog.getRequestLog("test"); Logger.getLogger("http.requests.test").removeAppender(requestLogAppender); assertNotNull("RequestLog should not be null",requestLog); assertEquals("Class mismatch",NCSARequestLog.class,requestLog.getClass()); }

Class: org.apache.hadoop.http.TestHttpServer

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testBindAddress() throws Exception { checkBindAddress("localhost",0,false).stop(); HttpServer2 myServer=checkBindAddress("localhost",0,false); HttpServer2 myServer2=null; try { int port=myServer.getConnectorAddress(0).getPort(); myServer2=checkBindAddress("localhost",port,true); port=myServer2.getConnectorAddress(0).getPort(); myServer2.stop(); assertNull(myServer2.getConnectorAddress(0)); myServer2.openListeners(); assertEquals(port,myServer2.getConnectorAddress(0).getPort()); } finally { myServer.stop(); if (myServer2 != null) { myServer2.stop(); } } }

BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
/** * Test the maximum number of threads cannot be exceeded. */ @Test public void testMaxThreads() throws Exception { int clientThreads=MAX_THREADS * 10; Executor executor=Executors.newFixedThreadPool(clientThreads); final CountDownLatch ready=new CountDownLatch(clientThreads); final CountDownLatch start=new CountDownLatch(1); for (int i=0; i < clientThreads; i++) { executor.execute(new Runnable(){ @Override public void run(){ ready.countDown(); try { start.await(); assertEquals("a:b\nc:d\n",readOutput(new URL(baseUrl,"/echo?a=b&c=d"))); int serverThreads=server.webServer.getThreadPool().getThreads(); assertTrue("More threads are started than expected, Server Threads count: " + serverThreads,serverThreads <= MAX_THREADS); System.out.println("Number of threads = " + serverThreads + " which is less or equal than the max = "+ MAX_THREADS); } catch ( Exception e) { } } } ); } ready.await(); start.countDown(); }

APIUtilityVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testNoCacheHeader() throws Exception { URL url=new URL(baseUrl,"/echo?a=b&c=d"); HttpURLConnection conn=(HttpURLConnection)url.openConnection(); assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode()); assertEquals("no-cache",conn.getHeaderField("Cache-Control")); assertEquals("no-cache",conn.getHeaderField("Pragma")); assertNotNull(conn.getHeaderField("Expires")); assertNotNull(conn.getHeaderField("Date")); assertEquals(conn.getHeaderField("Expires"),conn.getHeaderField("Date")); }

Class: org.apache.hadoop.http.TestHttpServerLifecycle

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Test that the server is alive once started * @throws Throwableon failure */ @Test public void testWepAppContextAfterServerStop() throws Throwable { HttpServer2 server=null; String key="test.attribute.key"; String value="test.attribute.value"; server=createTestServer(); assertNotLive(server); server.start(); server.setAttribute(key,value); assertAlive(server); assertEquals(value,server.getAttribute(key)); stop(server); assertNull("Server context should have cleared",server.getAttribute(key)); }

Class: org.apache.hadoop.http.TestServletFilter

UtilityVerifier BooleanVerifier HybridVerifier 
@Test public void testServletFilterWhenInitThrowsException() throws Exception { Configuration conf=new Configuration(); conf.set(HttpServer2.FILTER_INITIALIZER_PROPERTY,ErrorFilter.Initializer.class.getName()); HttpServer2 http=createTestServer(conf); try { http.start(); fail("expecting exception"); } catch ( IOException e) { assertTrue(e.getMessage().contains("Problem in starting http server. Server handlers failed")); } }

Class: org.apache.hadoop.io.TestArrayPrimitiveWritable

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testOldFormat() throws IOException { ObjectWritable.writeObject(out,i,i.getClass(),null); in.reset(out.getData(),out.getLength()); @SuppressWarnings("deprecation") String className=UTF8.readString(in); assertEquals("The int[] written by ObjectWritable as a non-compact array " + "was not labelled as an array of int",i.getClass().getName(),className); int length=in.readInt(); assertEquals("The int[] written by ObjectWritable as a non-compact array " + "was not expected length",i.length,length); int[] readValue=new int[length]; try { for (int i=0; i < length; i++) { readValue[i]=(int)((Integer)ObjectWritable.readObject(in,null)); } } catch ( Exception e) { fail("The int[] written by ObjectWritable as a non-compact array " + "was corrupted. Failed to correctly read int[] of length " + length + ". Got exception:\n"+ StringUtils.stringifyException(e)); } assertTrue("The int[] written by ObjectWritable as a non-compact array " + "was corrupted.",Arrays.equals(i,readValue)); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test @SuppressWarnings("deprecation") public void testObjectLabeling() throws IOException { ObjectWritable.writeObject(out,i,i.getClass(),null,true); ArrayPrimitiveWritable apw=new ArrayPrimitiveWritable(i); ObjectWritable.writeObject(out,apw,apw.getClass(),null,true); in.reset(out.getData(),out.getLength()); String className=UTF8.readString(in); assertEquals("The int[] written by ObjectWritable was not labelled as " + "an ArrayPrimitiveWritable.Internal",ArrayPrimitiveWritable.Internal.class.getName(),className); ArrayPrimitiveWritable.Internal apwi=new ArrayPrimitiveWritable.Internal(); apwi.readFields(in); assertEquals("The ArrayPrimitiveWritable.Internal component type was corrupted",int.class,apw.getComponentType()); assertTrue("The int[] written by ObjectWritable as " + "ArrayPrimitiveWritable.Internal was corrupted",Arrays.equals(i,(int[])(apwi.get()))); String declaredClassName=UTF8.readString(in); assertEquals("The APW written by ObjectWritable was not labelled as " + "declaredClass ArrayPrimitiveWritable",ArrayPrimitiveWritable.class.getName(),declaredClassName); className=UTF8.readString(in); assertEquals("The APW written by ObjectWritable was not labelled as " + "class ArrayPrimitiveWritable",ArrayPrimitiveWritable.class.getName(),className); ArrayPrimitiveWritable apw2=new ArrayPrimitiveWritable(); apw2.readFields(in); assertEquals("The ArrayPrimitiveWritable component type was corrupted",int.class,apw2.getComponentType()); assertTrue("The int[] written by ObjectWritable as " + "ArrayPrimitiveWritable was corrupted",Arrays.equals(i,(int[])(apw2.get()))); }

IterativeVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testMany() throws IOException { for ( Object x : bigSet) { ObjectWritable.writeObject(out,x,x.getClass(),null,true); (new ArrayPrimitiveWritable(x)).write(out); } in.reset(out.getData(),out.getLength()); for (int x=0; x < resultSet.length; ) { resultSet[x++]=ObjectWritable.readObject(in,null); ArrayPrimitiveWritable apw=new ArrayPrimitiveWritable(); apw.readFields(in); resultSet[x++]=apw.get(); } assertEquals(expectedResultSet.length,resultSet.length); for (int x=0; x < resultSet.length; x++) { assertEquals("ComponentType of array " + x,expectedResultSet[x].getClass().getComponentType(),resultSet[x].getClass().getComponentType()); } assertTrue("In and Out arrays didn't match values",Arrays.deepEquals(expectedResultSet,resultSet)); }

Class: org.apache.hadoop.io.TestBooleanWritable

BooleanVerifier EqualityVerifier HybridVerifier 
/** * test {@link BooleanWritable} methods hashCode(), equals(), compareTo() */ @Test public void testCommonMethods(){ assertTrue("testCommonMethods1 error !!!",newInstance(true).equals(newInstance(true))); assertTrue("testCommonMethods2 error !!!",newInstance(false).equals(newInstance(false))); assertFalse("testCommonMethods3 error !!!",newInstance(false).equals(newInstance(true))); assertTrue("testCommonMethods4 error !!!",checkHashCode(newInstance(true),newInstance(true))); assertFalse("testCommonMethods5 error !!! ",checkHashCode(newInstance(true),newInstance(false))); assertTrue("testCommonMethods6 error !!!",newInstance(true).compareTo(newInstance(false)) > 0); assertTrue("testCommonMethods7 error !!!",newInstance(false).compareTo(newInstance(true)) < 0); assertTrue("testCommonMethods8 error !!!",newInstance(false).compareTo(newInstance(false)) == 0); assertEquals("testCommonMethods9 error !!!","true",newInstance(true).toString()); }

Class: org.apache.hadoop.io.TestBytesWritable

InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
/** * This test was written as result of adding the new zero * copy constructor and set method to BytesWritable. These * methods allow users to specify the backing buffer of the * BytesWritable instance and a length. */ @Test public void testZeroCopy(){ byte[] bytes="brock".getBytes(); BytesWritable zeroBuf=new BytesWritable(bytes,bytes.length); BytesWritable copyBuf=new BytesWritable(bytes); assertTrue("copy took place, backing array != array passed to constructor",bytes == zeroBuf.getBytes()); assertTrue("length of BW should backing byte array",zeroBuf.getLength() == bytes.length); assertEquals("objects with same backing array should be equal",zeroBuf,copyBuf); assertEquals("string repr of objects with same backing array should be equal",zeroBuf.toString(),copyBuf.toString()); assertTrue("compare order objects with same backing array should be equal",zeroBuf.compareTo(copyBuf) == 0); assertTrue("hash of objects with same backing array should be equal",zeroBuf.hashCode() == copyBuf.hashCode()); byte[] buffer=new byte[bytes.length * 5]; zeroBuf.set(buffer,0,buffer.length); zeroBuf.set(bytes,0,bytes.length); assertEquals("buffer created with (array, len) has bad contents",zeroBuf,copyBuf); assertTrue("buffer created with (array, len) has bad length",zeroBuf.getLength() == copyBuf.getLength()); }

IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testSizeChange() throws Exception { byte[] hadoop="hadoop".getBytes(); BytesWritable buf=new BytesWritable(hadoop); int size=buf.getLength(); int orig_capacity=buf.getCapacity(); buf.setSize(size * 2); int new_capacity=buf.getCapacity(); System.arraycopy(buf.getBytes(),0,buf.getBytes(),size,size); assertTrue(new_capacity >= size * 2); assertEquals(size * 2,buf.getLength()); assertTrue(new_capacity != orig_capacity); buf.setSize(size * 4); assertTrue(new_capacity != buf.getCapacity()); for (int i=0; i < size * 2; ++i) { assertEquals(hadoop[i % size],buf.getBytes()[i]); } assertEquals(size * 4,buf.copyBytes().length); buf.setCapacity(1); assertEquals(1,buf.getLength()); assertEquals(hadoop[0],buf.getBytes()[0]); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * test {@link ByteWritable} * methods compareTo(), toString(), equals() */ @Test public void testObjectCommonMethods(){ byte b=0x9; ByteWritable bw=new ByteWritable(); bw.set(b); assertTrue("testSetByteWritable error",bw.get() == b); assertTrue("testSetByteWritable error < 0",bw.compareTo(new ByteWritable((byte)0xA)) < 0); assertTrue("testSetByteWritable error > 0",bw.compareTo(new ByteWritable((byte)0x8)) > 0); assertTrue("testSetByteWritable error == 0",bw.compareTo(new ByteWritable((byte)0x9)) == 0); assertTrue("testSetByteWritable equals error !!!",bw.equals(new ByteWritable((byte)0x9))); assertTrue("testSetByteWritable equals error !!!",!bw.equals(new ByteWritable((byte)0xA))); assertTrue("testSetByteWritable equals error !!!",!bw.equals(new IntWritable(1))); assertEquals("testSetByteWritable error ","9",bw.toString()); }

Class: org.apache.hadoop.io.TestIOUtils

UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testSkipFully() throws IOException { byte inArray[]=new byte[]{0,1,2,3,4}; ByteArrayInputStream in=new ByteArrayInputStream(inArray); try { in.mark(inArray.length); IOUtils.skipFully(in,2); IOUtils.skipFully(in,2); try { IOUtils.skipFully(in,2); fail("expected to get a PrematureEOFException"); } catch ( EOFException e) { assertEquals("Premature EOF from inputStream " + "after skipping 1 byte(s).",e.getMessage()); } in.reset(); try { IOUtils.skipFully(in,20); fail("expected to get a PrematureEOFException"); } catch ( EOFException e) { assertEquals("Premature EOF from inputStream " + "after skipping 5 byte(s).",e.getMessage()); } in.reset(); IOUtils.skipFully(in,5); try { IOUtils.skipFully(in,10); fail("expected to get a PrematureEOFException"); } catch ( EOFException e) { assertEquals("Premature EOF from inputStream " + "after skipping 0 byte(s).",e.getMessage()); } } finally { in.close(); } }

UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testWrappedReadForCompressedData() throws IOException { byte[] buf=new byte[2]; InputStream mockStream=Mockito.mock(InputStream.class); Mockito.when(mockStream.read(buf,0,1)).thenReturn(1); Mockito.when(mockStream.read(buf,0,2)).thenThrow(new java.lang.InternalError()); try { assertEquals("Check expected value",1,IOUtils.wrappedReadForCompressedData(mockStream,buf,0,1)); } catch ( IOException ioe) { fail("Unexpected error while reading"); } try { IOUtils.wrappedReadForCompressedData(mockStream,buf,0,2); } catch ( IOException ioe) { GenericTestUtils.assertExceptionContains("Error while reading compressed data",ioe); } }

UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testCopyBytesWithCountShouldThrowOutTheStreamClosureExceptions() throws Exception { InputStream inputStream=Mockito.mock(InputStream.class); OutputStream outputStream=Mockito.mock(OutputStream.class); Mockito.doReturn(-1).when(inputStream).read(new byte[4096],0,1); Mockito.doThrow(new IOException("Exception in closing the stream")).when(outputStream).close(); try { IOUtils.copyBytes(inputStream,outputStream,(long)1,true); fail("Should throw out the exception"); } catch ( IOException e) { assertEquals("Not throwing the expected exception.","Exception in closing the stream",e.getMessage()); } Mockito.verify(inputStream,Mockito.atLeastOnce()).close(); Mockito.verify(outputStream,Mockito.atLeastOnce()).close(); }

Class: org.apache.hadoop.io.TestMapFile

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
/** * test {@code MapFile.Writer} constructor with IOException */ @Test public void testPathExplosionWriterCreation(){ Path path=new Path(TEST_DIR,"testPathExplosionWriterCreation.mapfile"); String TEST_ERROR_MESSAGE="Mkdirs failed to create directory " + path.getName(); MapFile.Writer writer=null; try { FileSystem fsSpy=spy(FileSystem.get(conf)); Path pathSpy=spy(path); when(fsSpy.mkdirs(path)).thenThrow(new IOException(TEST_ERROR_MESSAGE)); when(pathSpy.getFileSystem(conf)).thenReturn(fsSpy); writer=new MapFile.Writer(conf,pathSpy,MapFile.Writer.keyClass(IntWritable.class),MapFile.Writer.valueClass(IntWritable.class)); fail("fail in testPathExplosionWriterCreation !!!"); } catch ( IOException ex) { assertEquals("testPathExplosionWriterCreation ex message error !!!",ex.getMessage(),TEST_ERROR_MESSAGE); } catch ( Exception e) { fail("fail in testPathExplosionWriterCreation. Other ex !!!"); } finally { IOUtils.cleanup(null,writer); } }

UtilityVerifier BooleanVerifier HybridVerifier 
/** * test throwing {@code IOException} in {@code MapFile.Writer} constructor */ @Test public void testWriteWithFailDirCreation(){ String ERROR_MESSAGE="Mkdirs failed to create directory"; Path dirName=new Path(TEST_DIR,"fail.mapfile"); MapFile.Writer writer=null; try { FileSystem fs=FileSystem.getLocal(conf); FileSystem spyFs=spy(fs); Path pathSpy=spy(dirName); when(pathSpy.getFileSystem(conf)).thenReturn(spyFs); when(spyFs.mkdirs(dirName)).thenReturn(false); writer=new MapFile.Writer(conf,pathSpy,MapFile.Writer.keyClass(IntWritable.class),MapFile.Writer.valueClass(Text.class)); fail("testWriteWithFailDirCreation error !!!"); } catch ( IOException ex) { assertTrue("testWriteWithFailDirCreation ex error !!!",ex.getMessage().startsWith(ERROR_MESSAGE)); } finally { IOUtils.cleanup(null,writer); } }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * test {@code MapFile.Reader.getClosest()} method */ @Test public void testGetClosestOnCurrentApi() throws Exception { final String TEST_PREFIX="testGetClosestOnCurrentApi.mapfile"; MapFile.Writer writer=null; MapFile.Reader reader=null; try { writer=createWriter(TEST_PREFIX,Text.class,Text.class); int FIRST_KEY=1; for (int i=FIRST_KEY; i < 100; i+=10) { Text t=new Text(Integer.toString(i)); writer.append(t,t); } writer.close(); reader=createReader(TEST_PREFIX,Text.class); Text key=new Text("55"); Text value=new Text(); Text closest=(Text)reader.getClosest(key,value); assertEquals(new Text("61"),closest); closest=(Text)reader.getClosest(key,value,true); assertEquals(new Text("51"),closest); final Text explicitKey=new Text("21"); closest=(Text)reader.getClosest(explicitKey,value); assertEquals(new Text("21"),explicitKey); key=new Text("00"); closest=(Text)reader.getClosest(key,value); assertEquals(FIRST_KEY,Integer.parseInt(closest.toString())); key=new Text("92"); closest=(Text)reader.getClosest(key,value); assertNull("Not null key in testGetClosestWithNewCode",closest); closest=(Text)reader.getClosest(key,value,true); assertEquals(new Text("91"),closest); } finally { IOUtils.cleanup(null,writer,reader); } }

BranchVerifier TestInitializer UtilityVerifier HybridVerifier 
@Before public void setup() throws Exception { LocalFileSystem fs=FileSystem.getLocal(conf); if (fs.exists(TEST_DIR) && !fs.delete(TEST_DIR,true)) { Assert.fail("Can't clean up test root dir"); } fs.mkdirs(TEST_DIR); }

UtilityVerifier EqualityVerifier HybridVerifier 
/** * test {@code MapFile.Reader.finalKey()} method */ @Test public void testOnFinalKey(){ final String TEST_METHOD_KEY="testOnFinalKey.mapfile"; int SIZE=10; MapFile.Writer writer=null; MapFile.Reader reader=null; try { writer=createWriter(TEST_METHOD_KEY,IntWritable.class,IntWritable.class); for (int i=0; i < SIZE; i++) writer.append(new IntWritable(i),new IntWritable(i)); writer.close(); reader=createReader(TEST_METHOD_KEY,IntWritable.class); IntWritable expectedKey=new IntWritable(0); reader.finalKey(expectedKey); assertEquals("testOnFinalKey not same !!!",expectedKey,new IntWritable(9)); } catch ( IOException ex) { fail("testOnFinalKey error !!!"); } finally { IOUtils.cleanup(null,writer,reader); } }

UtilityVerifier EqualityVerifier HybridVerifier 
/** * test {@code MapFile.rename()} * method with throwing {@code IOException} */ @Test public void testRenameWithException(){ final String ERROR_MESSAGE="Can't rename file"; final String NEW_FILE_NAME="test-new.mapfile"; final String OLD_FILE_NAME="test-old.mapfile"; MapFile.Writer writer=null; try { FileSystem fs=FileSystem.getLocal(conf); FileSystem spyFs=spy(fs); writer=createWriter(OLD_FILE_NAME,IntWritable.class,IntWritable.class); writer.close(); Path oldDir=new Path(TEST_DIR,OLD_FILE_NAME); Path newDir=new Path(TEST_DIR,NEW_FILE_NAME); when(spyFs.rename(oldDir,newDir)).thenThrow(new IOException(ERROR_MESSAGE)); MapFile.rename(spyFs,oldDir.toString(),newDir.toString()); fail("testRenameWithException no exception error !!!"); } catch ( IOException ex) { assertEquals("testRenameWithException invalid IOExceptionMessage !!!",ex.getMessage(),ERROR_MESSAGE); } finally { IOUtils.cleanup(null,writer); } }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Test getClosest feature. * @throws Exception */ @Test @SuppressWarnings("deprecation") public void testGetClosest() throws Exception { Path dirName=new Path(TEST_DIR,"testGetClosest.mapfile"); FileSystem fs=FileSystem.getLocal(conf); Path qualifiedDirName=fs.makeQualified(dirName); MapFile.Writer.setIndexInterval(conf,3); MapFile.Writer writer=null; MapFile.Reader reader=null; try { writer=new MapFile.Writer(conf,fs,qualifiedDirName.toString(),Text.class,Text.class); assertEquals(3,writer.getIndexInterval()); final int FIRST_KEY=10; for (int i=FIRST_KEY; i < 100; i+=10) { String iStr=Integer.toString(i); Text t=new Text("00".substring(iStr.length()) + iStr); writer.append(t,t); } writer.close(); reader=new MapFile.Reader(qualifiedDirName,conf); Text key=new Text("55"); Text value=new Text(); Text closest=(Text)reader.getClosest(key,value); assertEquals(new Text("60"),closest); closest=(Text)reader.getClosest(key,value,true); assertEquals(new Text("50"),closest); final Text TWENTY=new Text("20"); closest=(Text)reader.getClosest(TWENTY,value); assertEquals(TWENTY,closest); closest=(Text)reader.getClosest(TWENTY,value,true); assertEquals(TWENTY,closest); key=new Text("00"); closest=(Text)reader.getClosest(key,value); assertEquals(FIRST_KEY,Integer.parseInt(closest.toString())); closest=(Text)reader.getClosest(key,value,true); assertNull(closest); key=new Text("99"); closest=(Text)reader.getClosest(key,value); assertNull(closest); closest=(Text)reader.getClosest(key,value,true); assertEquals(new Text("90"),closest); } finally { IOUtils.cleanup(null,writer,reader); } }

UtilityVerifier NullVerifier HybridVerifier 
/** * test {@code MapFile.Writer} constructor with key, value * and validate it with {@code keyClass(), valueClass()} methods */ @Test public void testKeyValueClasses(){ Class> keyClass=IntWritable.class; Class valueClass=Text.class; try { createWriter("testKeyValueClasses.mapfile",IntWritable.class,Text.class).close(); assertNotNull("writer key class null error !!!",MapFile.Writer.keyClass(keyClass)); assertNotNull("writer value class null error !!!",MapFile.Writer.valueClass(valueClass)); } catch ( IOException ex) { fail(ex.getMessage()); } }

APIUtilityVerifier UtilityVerifier InternalCallVerifier NullVerifier HybridVerifier 
/** * test all available constructor for {@code MapFile.Writer} */ @Test @SuppressWarnings("deprecation") public void testDeprecatedConstructors(){ String path=new Path(TEST_DIR,"writes.mapfile").toString(); MapFile.Writer writer=null; MapFile.Reader reader=null; try { FileSystem fs=FileSystem.getLocal(conf); writer=new MapFile.Writer(conf,fs,path,IntWritable.class,Text.class,CompressionType.RECORD); assertNotNull(writer); writer.close(); writer=new MapFile.Writer(conf,fs,path,IntWritable.class,Text.class,CompressionType.RECORD,defaultProgressable); assertNotNull(writer); writer.close(); writer=new MapFile.Writer(conf,fs,path,IntWritable.class,Text.class,CompressionType.RECORD,defaultCodec,defaultProgressable); assertNotNull(writer); writer.close(); writer=new MapFile.Writer(conf,fs,path,WritableComparator.get(Text.class),Text.class); assertNotNull(writer); writer.close(); writer=new MapFile.Writer(conf,fs,path,WritableComparator.get(Text.class),Text.class,SequenceFile.CompressionType.RECORD); assertNotNull(writer); writer.close(); writer=new MapFile.Writer(conf,fs,path,WritableComparator.get(Text.class),Text.class,CompressionType.RECORD,defaultProgressable); assertNotNull(writer); writer.close(); reader=new MapFile.Reader(fs,path,WritableComparator.get(IntWritable.class),conf); assertNotNull(reader); assertNotNull("reader key is null !!!",reader.getKeyClass()); assertNotNull("reader value in null",reader.getValueClass()); } catch ( IOException e) { fail(e.getMessage()); } finally { IOUtils.cleanup(null,writer,reader); } }

APIUtilityVerifier IterativeVerifier UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier 
/** * test {@code MapFile.Reader.next(key, value)} for iteration. */ @Test public void testReaderKeyIteration(){ final String TEST_METHOD_KEY="testReaderKeyIteration.mapfile"; int SIZE=10; int ITERATIONS=5; MapFile.Writer writer=null; MapFile.Reader reader=null; try { writer=createWriter(TEST_METHOD_KEY,IntWritable.class,Text.class); int start=0; for (int i=0; i < SIZE; i++) writer.append(new IntWritable(i),new Text("Value:" + i)); writer.close(); reader=createReader(TEST_METHOD_KEY,IntWritable.class); Writable startValue=new Text("Value:" + start); int i=0; while (i++ < ITERATIONS) { IntWritable key=new IntWritable(start); Writable value=startValue; while (reader.next(key,value)) { assertNotNull(key); assertNotNull(value); } reader.reset(); } assertTrue("reader seek error !!!",reader.seek(new IntWritable(SIZE / 2))); assertFalse("reader seek error !!!",reader.seek(new IntWritable(SIZE * 2))); } catch ( IOException ex) { fail("reader seek error !!!"); } finally { IOUtils.cleanup(null,writer,reader); } }

APIUtilityVerifier BranchVerifier UtilityVerifier BooleanVerifier HybridVerifier 
/** * test {@code MapFile.Writer.testFix} method */ @Test public void testFix(){ final String INDEX_LESS_MAP_FILE="testFix.mapfile"; int PAIR_SIZE=20; MapFile.Writer writer=null; try { FileSystem fs=FileSystem.getLocal(conf); Path dir=new Path(TEST_DIR,INDEX_LESS_MAP_FILE); writer=createWriter(INDEX_LESS_MAP_FILE,IntWritable.class,Text.class); for (int i=0; i < PAIR_SIZE; i++) writer.append(new IntWritable(0),new Text("value")); writer.close(); File indexFile=new File(".","." + INDEX_LESS_MAP_FILE + "/index"); boolean isDeleted=false; if (indexFile.exists()) isDeleted=indexFile.delete(); if (isDeleted) assertTrue("testFix error !!!",MapFile.fix(fs,dir,IntWritable.class,Text.class,true,conf) == PAIR_SIZE); } catch ( Exception ex) { fail("testFix error !!!"); } finally { IOUtils.cleanup(null,writer); } }

UtilityVerifier BooleanVerifier HybridVerifier 
@Test public void testRenameWithFalse(){ final String ERROR_MESSAGE="Could not rename"; final String NEW_FILE_NAME="test-new.mapfile"; final String OLD_FILE_NAME="test-old.mapfile"; MapFile.Writer writer=null; try { FileSystem fs=FileSystem.getLocal(conf); FileSystem spyFs=spy(fs); writer=createWriter(OLD_FILE_NAME,IntWritable.class,IntWritable.class); writer.close(); Path oldDir=new Path(TEST_DIR,OLD_FILE_NAME); Path newDir=new Path(TEST_DIR,NEW_FILE_NAME); when(spyFs.rename(oldDir,newDir)).thenReturn(false); MapFile.rename(spyFs,oldDir.toString(),newDir.toString()); fail("testRenameWithException no exception error !!!"); } catch ( IOException ex) { assertTrue("testRenameWithFalse invalid IOExceptionMessage error !!!",ex.getMessage().startsWith(ERROR_MESSAGE)); } finally { IOUtils.cleanup(null,writer); } }

Class: org.apache.hadoop.io.TestSecureIOUtils

UtilityVerifier AssumptionSetter HybridVerifier 
@Test(timeout=10000) public void testReadIncorrectlyRestrictedWithSecurity() throws IOException { assumeTrue(NativeIO.isAvailable()); System.out.println("Running test with native libs..."); String invalidUser="InvalidUser"; try { SecureIOUtils.forceSecureOpenForRead(testFilePathIs,invalidUser,realGroup).close(); fail("Didn't throw expection for wrong user ownership!"); } catch ( IOException ioe) { } try { SecureIOUtils.forceSecureOpenFSDataInputStream(testFilePathFadis,invalidUser,realGroup).close(); fail("Didn't throw expection for wrong user ownership!"); } catch ( IOException ioe) { } try { SecureIOUtils.forceSecureOpenForRandomRead(testFilePathRaf,"r",invalidUser,realGroup).close(); fail("Didn't throw expection for wrong user ownership!"); } catch ( IOException ioe) { } }

Class: org.apache.hadoop.io.TestSortedMapWritable

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=1000) public void testPutAll(){ SortedMapWritable map1=new SortedMapWritable(); SortedMapWritable map2=new SortedMapWritable(); map1.put(new Text("key"),new Text("value")); map2.putAll(map1); assertEquals("map1 entries don't match map2 entries",map1,map2); assertTrue("map2 doesn't have class information from map1",map2.classToIdMap.containsKey(Text.class) && map2.idToClassMap.containsValue(Text.class)); }

IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * the test */ @Test @SuppressWarnings("unchecked") public void testSortedMapWritable(){ Text[] keys={new Text("key1"),new Text("key2"),new Text("key3")}; BytesWritable[] values={new BytesWritable("value1".getBytes()),new BytesWritable("value2".getBytes()),new BytesWritable("value3".getBytes())}; SortedMapWritable inMap=new SortedMapWritable(); for (int i=0; i < keys.length; i++) { inMap.put(keys[i],values[i]); } assertEquals(0,inMap.firstKey().compareTo(keys[0])); assertEquals(0,inMap.lastKey().compareTo(keys[2])); SortedMapWritable outMap=new SortedMapWritable(inMap); assertEquals(inMap.size(),outMap.size()); for ( Map.Entry e : inMap.entrySet()) { assertTrue(outMap.containsKey(e.getKey())); assertEquals(0,((WritableComparable)outMap.get(e.getKey())).compareTo(e.getValue())); } Text[] maps={new Text("map1"),new Text("map2")}; SortedMapWritable mapOfMaps=new SortedMapWritable(); mapOfMaps.put(maps[0],inMap); mapOfMaps.put(maps[1],outMap); SortedMapWritable copyOfMapOfMaps=new SortedMapWritable(mapOfMaps); for (int i=0; i < maps.length; i++) { assertTrue(copyOfMapOfMaps.containsKey(maps[i])); SortedMapWritable a=(SortedMapWritable)mapOfMaps.get(maps[i]); SortedMapWritable b=(SortedMapWritable)copyOfMapOfMaps.get(maps[i]); assertEquals(a.size(),b.size()); for ( Writable key : a.keySet()) { assertTrue(b.containsKey(key)); WritableComparable aValue=(WritableComparable)a.get(key); WritableComparable bValue=(WritableComparable)b.get(key); assertEquals(0,aValue.compareTo(bValue)); } } }

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Tests if equal and hashCode method still hold the contract. */ @Test public void testEqualsAndHashCode(){ String failureReason; SortedMapWritable mapA=new SortedMapWritable(); SortedMapWritable mapB=new SortedMapWritable(); failureReason="SortedMapWritable couldn't be initialized. Got null reference"; assertNotNull(failureReason,mapA); assertNotNull(failureReason,mapB); assertFalse("equals method returns true when passed null",mapA.equals(null)); assertTrue("Two empty SortedMapWritables are no longer equal",mapA.equals(mapB)); Text[] keys={new Text("key1"),new Text("key2")}; BytesWritable[] values={new BytesWritable("value1".getBytes()),new BytesWritable("value2".getBytes())}; mapA.put(keys[0],values[0]); mapB.put(keys[1],values[1]); failureReason="Two SortedMapWritables with different data are now equal"; assertTrue(failureReason,mapA.hashCode() != mapB.hashCode()); assertTrue(failureReason,!mapA.equals(mapB)); assertTrue(failureReason,!mapB.equals(mapA)); mapA.put(keys[1],values[1]); mapB.put(keys[0],values[0]); failureReason="Two SortedMapWritables with same entry sets formed in different order are now different"; assertEquals(failureReason,mapA.hashCode(),mapB.hashCode()); assertTrue(failureReason,mapA.equals(mapB)); assertTrue(failureReason,mapB.equals(mapA)); mapA.put(keys[0],values[1]); mapA.put(keys[1],values[0]); failureReason="Two SortedMapWritables with different content are now equal"; assertTrue(failureReason,mapA.hashCode() != mapB.hashCode()); assertTrue(failureReason,!mapA.equals(mapB)); assertTrue(failureReason,!mapB.equals(mapA)); }

Class: org.apache.hadoop.io.compress.TestCodec

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testGzipCodecRead() throws IOException { Configuration conf=new Configuration(); conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY,false); assertFalse("ZlibFactory is using native libs against request",ZlibFactory.isNativeZlibLoaded(conf)); Decompressor zlibDecompressor=ZlibFactory.getZlibDecompressor(conf); assertNotNull("zlibDecompressor is null!",zlibDecompressor); assertTrue("ZlibFactory returned unexpected inflator",zlibDecompressor instanceof BuiltInZlibInflater); CodecPool.returnDecompressor(zlibDecompressor); String tmpDir=System.getProperty("test.build.data","/tmp/"); Path f=new Path(new Path(tmpDir),"testGzipCodecRead.txt.gz"); BufferedWriter bw=new BufferedWriter(new OutputStreamWriter(new GZIPOutputStream(new FileOutputStream(f.toString())))); final String msg="This is the message in the file!"; bw.write(msg); bw.close(); CompressionCodecFactory ccf=new CompressionCodecFactory(conf); CompressionCodec codec=ccf.getCodec(f); Decompressor decompressor=CodecPool.getDecompressor(codec); FileSystem fs=FileSystem.getLocal(conf); InputStream is=fs.open(f); is=codec.createInputStream(is,decompressor); BufferedReader br=new BufferedReader(new InputStreamReader(is)); String line=br.readLine(); assertEquals("Didn't get the same message back!",msg,line); br.close(); }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testGzipCompatibility() throws IOException { Random r=new Random(); long seed=r.nextLong(); r.setSeed(seed); LOG.info("seed: " + seed); DataOutputBuffer dflbuf=new DataOutputBuffer(); GZIPOutputStream gzout=new GZIPOutputStream(dflbuf); byte[] b=new byte[r.nextInt(128 * 1024 + 1)]; r.nextBytes(b); gzout.write(b); gzout.close(); DataInputBuffer gzbuf=new DataInputBuffer(); gzbuf.reset(dflbuf.getData(),dflbuf.getLength()); Configuration conf=new Configuration(); conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY,false); CompressionCodec codec=ReflectionUtils.newInstance(GzipCodec.class,conf); Decompressor decom=codec.createDecompressor(); assertNotNull(decom); assertEquals(BuiltInGzipDecompressor.class,decom.getClass()); InputStream gzin=codec.createInputStream(gzbuf,decom); dflbuf.reset(); IOUtils.copyBytes(gzin,dflbuf,4096); final byte[] dflchk=Arrays.copyOf(dflbuf.getData(),dflbuf.getLength()); assertArrayEquals(b,dflchk); }

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testGzipLongOverflow() throws IOException { LOG.info("testGzipLongOverflow"); Configuration conf=new Configuration(); conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY,false); assertFalse("ZlibFactory is using native libs against request",ZlibFactory.isNativeZlibLoaded(conf)); Decompressor zlibDecompressor=ZlibFactory.getZlibDecompressor(conf); assertNotNull("zlibDecompressor is null!",zlibDecompressor); assertTrue("ZlibFactory returned unexpected inflator",zlibDecompressor instanceof BuiltInZlibInflater); CodecPool.returnDecompressor(zlibDecompressor); String tmpDir=System.getProperty("test.build.data","/tmp/"); Path f=new Path(new Path(tmpDir),"testGzipLongOverflow.bin.gz"); BufferedWriter bw=new BufferedWriter(new OutputStreamWriter(new GZIPOutputStream(new FileOutputStream(f.toString())))); final int NBUF=1024 * 4 + 1; final char[] buf=new char[1024 * 1024]; for (int i=0; i < buf.length; i++) buf[i]='\0'; for (int i=0; i < NBUF; i++) { bw.write(buf); } bw.close(); CompressionCodecFactory ccf=new CompressionCodecFactory(conf); CompressionCodec codec=ccf.getCodec(f); Decompressor decompressor=CodecPool.getDecompressor(codec); FileSystem fs=FileSystem.getLocal(conf); InputStream is=fs.open(f); is=codec.createInputStream(is,decompressor); BufferedReader br=new BufferedReader(new InputStreamReader(is)); for (int j=0; j < NBUF; j++) { int n=br.read(buf); assertEquals("got wrong read length!",n,buf.length); for (int i=0; i < buf.length; i++) assertEquals("got wrong byte!",buf[i],'\0'); } br.close(); }

Class: org.apache.hadoop.io.compress.lz4.TestLz4CompressorDecompressor

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testCompressDecompress(){ int BYTE_SIZE=1024 * 54; byte[] bytes=generate(BYTE_SIZE); Lz4Compressor compressor=new Lz4Compressor(); try { compressor.setInput(bytes,0,bytes.length); assertTrue("Lz4CompressDecompress getBytesRead error !!!",compressor.getBytesRead() > 0); assertTrue("Lz4CompressDecompress getBytesWritten before compress error !!!",compressor.getBytesWritten() == 0); byte[] compressed=new byte[BYTE_SIZE]; int cSize=compressor.compress(compressed,0,compressed.length); assertTrue("Lz4CompressDecompress getBytesWritten after compress error !!!",compressor.getBytesWritten() > 0); Lz4Decompressor decompressor=new Lz4Decompressor(); decompressor.setInput(compressed,0,cSize); byte[] decompressed=new byte[BYTE_SIZE]; decompressor.decompress(decompressed,0,decompressed.length); assertTrue("testLz4CompressDecompress finished error !!!",decompressor.finished()); assertArrayEquals(bytes,decompressed); compressor.reset(); decompressor.reset(); assertTrue("decompressor getRemaining error !!!",decompressor.getRemaining() == 0); } catch ( Exception e) { fail("testLz4CompressDecompress ex error!!!"); } }

APIUtilityVerifier UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testCompressorDecopressorLogicWithCompressionStreams(){ DataOutputStream deflateOut=null; DataInputStream inflateIn=null; int BYTE_SIZE=1024 * 100; byte[] bytes=generate(BYTE_SIZE); int bufferSize=262144; int compressionOverhead=(bufferSize / 6) + 32; try { DataOutputBuffer compressedDataBuffer=new DataOutputBuffer(); CompressionOutputStream deflateFilter=new BlockCompressorStream(compressedDataBuffer,new Lz4Compressor(bufferSize),bufferSize,compressionOverhead); deflateOut=new DataOutputStream(new BufferedOutputStream(deflateFilter)); deflateOut.write(bytes,0,bytes.length); deflateOut.flush(); deflateFilter.finish(); DataInputBuffer deCompressedDataBuffer=new DataInputBuffer(); deCompressedDataBuffer.reset(compressedDataBuffer.getData(),0,compressedDataBuffer.getLength()); CompressionInputStream inflateFilter=new BlockDecompressorStream(deCompressedDataBuffer,new Lz4Decompressor(bufferSize),bufferSize); inflateIn=new DataInputStream(new BufferedInputStream(inflateFilter)); byte[] result=new byte[BYTE_SIZE]; inflateIn.read(result); assertArrayEquals("original array not equals compress/decompressed array",result,bytes); } catch ( IOException e) { fail("testLz4CompressorDecopressorLogicWithCompressionStreams ex error !!!"); } finally { try { if (deflateOut != null) deflateOut.close(); if (inflateIn != null) inflateIn.close(); } catch ( Exception e) { } } }

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier PublicFieldVerifier HybridVerifier 
@Test public void testSetInputWithBytesSizeMoreThenDefaultLz4CompressorByfferSize(){ int BYTES_SIZE=1024 * 64 + 1; try { Lz4Compressor compressor=new Lz4Compressor(); byte[] bytes=generate(BYTES_SIZE); assertTrue("needsInput error !!!",compressor.needsInput()); compressor.setInput(bytes,0,bytes.length); byte[] emptyBytes=new byte[BYTES_SIZE]; int csize=compressor.compress(emptyBytes,0,bytes.length); assertTrue("testSetInputWithBytesSizeMoreThenDefaultLz4CompressorByfferSize error !!!",csize != 0); } catch ( Exception ex) { fail("testSetInputWithBytesSizeMoreThenDefaultLz4CompressorByfferSize ex error !!!"); } }

APIUtilityVerifier UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testCompressorDecompressorEmptyStreamLogic(){ ByteArrayInputStream bytesIn=null; ByteArrayOutputStream bytesOut=null; byte[] buf=null; BlockDecompressorStream blockDecompressorStream=null; try { bytesOut=new ByteArrayOutputStream(); BlockCompressorStream blockCompressorStream=new BlockCompressorStream(bytesOut,new Lz4Compressor(),1024,0); blockCompressorStream.close(); buf=bytesOut.toByteArray(); assertEquals("empty stream compressed output size != 4",4,buf.length); bytesIn=new ByteArrayInputStream(buf); blockDecompressorStream=new BlockDecompressorStream(bytesIn,new Lz4Decompressor(),1024); assertEquals("return value is not -1",-1,blockDecompressorStream.read()); } catch ( Exception e) { fail("testCompressorDecompressorEmptyStreamLogic ex error !!!" + e.getMessage()); } finally { if (blockDecompressorStream != null) try { bytesIn.close(); bytesOut.close(); blockDecompressorStream.close(); } catch ( IOException e) { } } }

TestInitializer AssumptionSetter HybridVerifier 
@Before public void before(){ assumeTrue(Lz4Codec.isNativeCodeLoaded()); }

Class: org.apache.hadoop.io.compress.snappy.TestSnappyCompressorDecompressor

UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testSnappyCompressDecompress(){ int BYTE_SIZE=1024 * 54; byte[] bytes=BytesGenerator.get(BYTE_SIZE); SnappyCompressor compressor=new SnappyCompressor(); try { compressor.setInput(bytes,0,bytes.length); assertTrue("SnappyCompressDecompress getBytesRead error !!!",compressor.getBytesRead() > 0); assertTrue("SnappyCompressDecompress getBytesWritten before compress error !!!",compressor.getBytesWritten() == 0); byte[] compressed=new byte[BYTE_SIZE]; int cSize=compressor.compress(compressed,0,compressed.length); assertTrue("SnappyCompressDecompress getBytesWritten after compress error !!!",compressor.getBytesWritten() > 0); SnappyDecompressor decompressor=new SnappyDecompressor(BYTE_SIZE); decompressor.setInput(compressed,0,cSize); byte[] decompressed=new byte[BYTE_SIZE]; decompressor.decompress(decompressed,0,decompressed.length); assertTrue("testSnappyCompressDecompress finished error !!!",decompressor.finished()); Assert.assertArrayEquals(bytes,decompressed); compressor.reset(); decompressor.reset(); assertTrue("decompressor getRemaining error !!!",decompressor.getRemaining() == 0); } catch ( Exception e) { fail("testSnappyCompressDecompress ex error!!!"); } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testSnappyCompressorDecopressorLogicWithCompressionStreams(){ int BYTE_SIZE=1024 * 100; byte[] bytes=BytesGenerator.get(BYTE_SIZE); int bufferSize=262144; int compressionOverhead=(bufferSize / 6) + 32; DataOutputStream deflateOut=null; DataInputStream inflateIn=null; try { DataOutputBuffer compressedDataBuffer=new DataOutputBuffer(); CompressionOutputStream deflateFilter=new BlockCompressorStream(compressedDataBuffer,new SnappyCompressor(bufferSize),bufferSize,compressionOverhead); deflateOut=new DataOutputStream(new BufferedOutputStream(deflateFilter)); deflateOut.write(bytes,0,bytes.length); deflateOut.flush(); deflateFilter.finish(); DataInputBuffer deCompressedDataBuffer=new DataInputBuffer(); deCompressedDataBuffer.reset(compressedDataBuffer.getData(),0,compressedDataBuffer.getLength()); CompressionInputStream inflateFilter=new BlockDecompressorStream(deCompressedDataBuffer,new SnappyDecompressor(bufferSize),bufferSize); inflateIn=new DataInputStream(new BufferedInputStream(inflateFilter)); byte[] result=new byte[BYTE_SIZE]; inflateIn.read(result); Assert.assertArrayEquals("original array not equals compress/decompressed array",result,bytes); } catch ( IOException e) { fail("testSnappyCompressorDecopressorLogicWithCompressionStreams ex error !!!"); } finally { try { if (deflateOut != null) deflateOut.close(); if (inflateIn != null) inflateIn.close(); } catch ( Exception e) { } } }

UtilityVerifier AssumptionSetter HybridVerifier 
@Test public void testSnappyDirectBlockCompression(){ int[] size={4 * 1024,64 * 1024,128 * 1024,1024 * 1024}; assumeTrue(SnappyCodec.isNativeCodeLoaded()); try { for (int i=0; i < size.length; i++) { compressDecompressLoop(size[i]); } } catch ( IOException ex) { fail("testSnappyDirectBlockCompression ex !!!" + ex); } }

TestInitializer AssumptionSetter HybridVerifier 
@Before public void before(){ assumeTrue(SnappyCodec.isNativeCodeLoaded()); }

UtilityVerifier BooleanVerifier HybridVerifier 
@Test public void testSnappyBlockCompression(){ int BYTE_SIZE=1024 * 50; int BLOCK_SIZE=512; ByteArrayOutputStream out=new ByteArrayOutputStream(); byte[] block=new byte[BLOCK_SIZE]; byte[] bytes=BytesGenerator.get(BYTE_SIZE); try { SnappyCompressor compressor=new SnappyCompressor(); int off=0; int len=BYTE_SIZE; int maxSize=BLOCK_SIZE - 18; if (BYTE_SIZE > maxSize) { do { int bufLen=Math.min(len,maxSize); compressor.setInput(bytes,off,bufLen); compressor.finish(); while (!compressor.finished()) { compressor.compress(block,0,block.length); out.write(block); } compressor.reset(); off+=bufLen; len-=bufLen; } while (len > 0); } assertTrue("testSnappyBlockCompression error !!!",out.toByteArray().length > 0); } catch ( Exception ex) { fail("testSnappyBlockCompression ex error !!!"); } }

APIUtilityVerifier UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testCompressorDecompressorEmptyStreamLogic(){ ByteArrayInputStream bytesIn=null; ByteArrayOutputStream bytesOut=null; byte[] buf=null; BlockDecompressorStream blockDecompressorStream=null; try { bytesOut=new ByteArrayOutputStream(); BlockCompressorStream blockCompressorStream=new BlockCompressorStream(bytesOut,new SnappyCompressor(),1024,0); blockCompressorStream.close(); buf=bytesOut.toByteArray(); assertEquals("empty stream compressed output size != 4",4,buf.length); bytesIn=new ByteArrayInputStream(buf); blockDecompressorStream=new BlockDecompressorStream(bytesIn,new SnappyDecompressor(),1024); assertEquals("return value is not -1",-1,blockDecompressorStream.read()); } catch ( Exception e) { fail("testCompressorDecompressorEmptyStreamLogic ex error !!!" + e.getMessage()); } finally { if (blockDecompressorStream != null) try { bytesIn.close(); bytesOut.close(); blockDecompressorStream.close(); } catch ( IOException e) { } } }

Class: org.apache.hadoop.io.compress.zlib.TestZlibCompressorDecompressor

TestInitializer AssumptionSetter HybridVerifier 
@Before public void before(){ assumeTrue(ZlibFactory.isNativeZlibLoaded(new Configuration())); }

UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
@Test public void testBuiltInGzipDecompressorExceptions(){ BuiltInGzipDecompressor decompresser=new BuiltInGzipDecompressor(); try { decompresser.setInput(null,0,1); } catch ( NullPointerException ex) { } catch ( Exception ex) { fail("testBuiltInGzipDecompressorExceptions npe error " + ex); } try { decompresser.setInput(new byte[]{0},0,-1); } catch ( ArrayIndexOutOfBoundsException ex) { } catch ( Exception ex) { fail("testBuiltInGzipDecompressorExceptions aioob error" + ex); } assertTrue("decompresser.getBytesRead error",decompresser.getBytesRead() == 0); assertTrue("decompresser.getRemaining error",decompresser.getRemaining() == 0); decompresser.reset(); decompresser.end(); InputStream decompStream=null; try { int buffSize=1 * 1024; byte buffer[]=new byte[buffSize]; Decompressor decompressor=new BuiltInGzipDecompressor(); DataInputBuffer gzbuf=new DataInputBuffer(); decompStream=new DecompressorStream(gzbuf,decompressor); gzbuf.reset(new byte[]{0,0,1,1,1,1,11,1,1,1,1},11); decompStream.read(buffer); } catch ( IOException ioex) { } catch ( Exception ex) { fail("invalid 0 and 1 byte in gzip stream" + ex); } try { int buffSize=1 * 1024; byte buffer[]=new byte[buffSize]; Decompressor decompressor=new BuiltInGzipDecompressor(); DataInputBuffer gzbuf=new DataInputBuffer(); decompStream=new DecompressorStream(gzbuf,decompressor); gzbuf.reset(new byte[]{31,-117,7,1,1,1,1,11,1,1,1,1},11); decompStream.read(buffer); } catch ( IOException ioex) { } catch ( Exception ex) { fail("invalid 2 byte in gzip stream" + ex); } try { int buffSize=1 * 1024; byte buffer[]=new byte[buffSize]; Decompressor decompressor=new BuiltInGzipDecompressor(); DataInputBuffer gzbuf=new DataInputBuffer(); decompStream=new DecompressorStream(gzbuf,decompressor); gzbuf.reset(new byte[]{31,-117,8,-32,1,1,1,11,1,1,1,1},11); decompStream.read(buffer); } catch ( IOException ioex) { } catch ( Exception ex) { fail("invalid 3 byte in gzip stream" + ex); } try { int buffSize=1 * 1024; byte buffer[]=new byte[buffSize]; Decompressor decompressor=new BuiltInGzipDecompressor(); DataInputBuffer gzbuf=new DataInputBuffer(); decompStream=new DecompressorStream(gzbuf,decompressor); gzbuf.reset(new byte[]{31,-117,8,4,1,1,1,11,1,1,1,1},11); decompStream.read(buffer); } catch ( IOException ioex) { } catch ( Exception ex) { fail("invalid 3 byte make hasExtraField" + ex); } }

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testZlibCompressDecompress(){ byte[] rawData=null; int rawDataSize=0; rawDataSize=1024 * 64; rawData=generate(rawDataSize); try { ZlibCompressor compressor=new ZlibCompressor(); ZlibDecompressor decompressor=new ZlibDecompressor(); assertFalse("testZlibCompressDecompress finished error",compressor.finished()); compressor.setInput(rawData,0,rawData.length); assertTrue("testZlibCompressDecompress getBytesRead before error",compressor.getBytesRead() == 0); compressor.finish(); byte[] compressedResult=new byte[rawDataSize]; int cSize=compressor.compress(compressedResult,0,rawDataSize); assertTrue("testZlibCompressDecompress getBytesRead ather error",compressor.getBytesRead() == rawDataSize); assertTrue("testZlibCompressDecompress compressed size no less then original size",cSize < rawDataSize); decompressor.setInput(compressedResult,0,cSize); byte[] decompressedBytes=new byte[rawDataSize]; decompressor.decompress(decompressedBytes,0,decompressedBytes.length); assertArrayEquals("testZlibCompressDecompress arrays not equals ",rawData,decompressedBytes); compressor.reset(); decompressor.reset(); } catch ( IOException ex) { fail("testZlibCompressDecompress ex !!!" + ex); } }

BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
@Test public void testZlibCompressorDecompressorWithConfiguration(){ Configuration conf=new Configuration(); conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY,true); if (ZlibFactory.isNativeZlibLoaded(conf)) { byte[] rawData; int tryNumber=5; int BYTE_SIZE=10 * 1024; Compressor zlibCompressor=ZlibFactory.getZlibCompressor(conf); Decompressor zlibDecompressor=ZlibFactory.getZlibDecompressor(conf); rawData=generate(BYTE_SIZE); try { for (int i=0; i < tryNumber; i++) compressDecompressZlib(rawData,(ZlibCompressor)zlibCompressor,(ZlibDecompressor)zlibDecompressor); zlibCompressor.reinit(conf); } catch ( Exception ex) { fail("testZlibCompressorDecompressorWithConfiguration ex error " + ex); } } else { assertTrue("ZlibFactory is using native libs against request",ZlibFactory.isNativeZlibLoaded(conf)); } }

UtilityVerifier AssumptionSetter HybridVerifier 
@Test public void testZlibDirectCompressDecompress(){ int[] size={1,4,16,4 * 1024,64 * 1024,128 * 1024,1024 * 1024}; assumeTrue(NativeCodeLoader.isNativeCodeLoaded()); try { for (int i=0; i < size.length; i++) { compressDecompressLoop(size[i]); } } catch ( IOException ex) { fail("testZlibDirectCompressDecompress ex !!!" + ex); } }

Class: org.apache.hadoop.io.file.tfile.TestTFileByteArrays

UtilityVerifier InternalCallVerifier NullVerifier HybridVerifier 
@Test public void testFailureGetNonExistentMetaBlock() throws IOException { if (skip) return; writer.append("keyX".getBytes(),"valueX".getBytes()); DataOutputStream outMeta=writer.prepareMetaBlock("testX",Compression.Algorithm.GZ.getName()); outMeta.write(123); outMeta.write("foo".getBytes()); outMeta.close(); closeOutput(); Reader reader=new Reader(fs.open(path),fs.getFileStatus(path).getLen(),conf); DataInputStream mb=reader.getMetaBlock("testX"); Assert.assertNotNull(mb); mb.close(); try { DataInputStream mbBad=reader.getMetaBlock("testY"); Assert.fail("Error on handling non-existent metablocks."); } catch ( Exception e) { } reader.close(); }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testFailureReadValueManyTimes() throws IOException { if (skip) return; writeRecords(5); Reader reader=new Reader(fs.open(path),fs.getFileStatus(path).getLen(),conf); Scanner scanner=reader.createScanner(); byte[] vbuf=new byte[BUF_SIZE]; int vlen=scanner.entry().getValueLength(); scanner.entry().getValue(vbuf); Assert.assertEquals(new String(vbuf,0,vlen),VALUE + 0); try { scanner.entry().getValue(vbuf); Assert.fail("Cannot get the value mlutiple times."); } catch ( Exception e) { } scanner.close(); reader.close(); }

Class: org.apache.hadoop.io.nativeio.TestNativeIO

UtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test basic chmod operation */ @Test(timeout=30000) public void testChmod() throws Exception { if (Path.WINDOWS) { return; } try { NativeIO.POSIX.chmod("/this/file/doesnt/exist",777); fail("Chmod of non-existent file didn't fail"); } catch ( NativeIOException nioe) { assertEquals(Errno.ENOENT,nioe.getErrno()); } File toChmod=new File(TEST_DIR,"testChmod"); assertTrue("Create test subject",toChmod.exists() || toChmod.mkdir()); NativeIO.POSIX.chmod(toChmod.getAbsolutePath(),0777); assertPermissions(toChmod,0777); NativeIO.POSIX.chmod(toChmod.getAbsolutePath(),0000); assertPermissions(toChmod,0000); NativeIO.POSIX.chmod(toChmod.getAbsolutePath(),0644); assertPermissions(toChmod,0644); }

UtilityVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testOpenWithCreate() throws Exception { if (Path.WINDOWS) { return; } LOG.info("Test creating a file with O_CREAT"); FileDescriptor fd=NativeIO.POSIX.open(new File(TEST_DIR,"testWorkingOpen").getAbsolutePath(),NativeIO.POSIX.O_WRONLY | NativeIO.POSIX.O_CREAT,0700); assertNotNull(true); assertTrue(fd.valid()); FileOutputStream fos=new FileOutputStream(fd); fos.write("foo".getBytes()); fos.close(); assertFalse(fd.valid()); LOG.info("Test exclusive create"); try { fd=NativeIO.POSIX.open(new File(TEST_DIR,"testWorkingOpen").getAbsolutePath(),NativeIO.POSIX.O_WRONLY | NativeIO.POSIX.O_CREAT | NativeIO.POSIX.O_EXCL,0700); fail("Was able to create existing file with O_EXCL"); } catch ( NativeIOException nioe) { LOG.info("Got expected exception for failed exclusive create",nioe); assertEquals(Errno.EEXIST,nioe.getErrno()); } }

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Test for races in fstat usage * NOTE: this test is likely to fail on RHEL 6.0 which has a non-threadsafe * implementation of getpwuid_r. */ @Test(timeout=30000) public void testMultiThreadedFstat() throws Exception { if (Path.WINDOWS) { return; } final FileOutputStream fos=new FileOutputStream(new File(TEST_DIR,"testfstat")); final AtomicReference thrown=new AtomicReference(); List statters=new ArrayList(); for (int i=0; i < 10; i++) { Thread statter=new Thread(){ @Override public void run(){ long et=Time.now() + 5000; while (Time.now() < et) { try { NativeIO.POSIX.Stat stat=NativeIO.POSIX.getFstat(fos.getFD()); assertEquals(System.getProperty("user.name"),stat.getOwner()); assertNotNull(stat.getGroup()); assertTrue(!stat.getGroup().isEmpty()); assertEquals("Stat mode field should indicate a regular file",NativeIO.POSIX.Stat.S_IFREG,stat.getMode() & NativeIO.POSIX.Stat.S_IFMT); } catch ( Throwable t) { thrown.set(t); } } } } ; statters.add(statter); statter.start(); } for ( Thread t : statters) { t.join(); } fos.close(); if (thrown.get() != null) { throw new RuntimeException(thrown.get()); } }

APIUtilityVerifier AssumptionSetter EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testMlock() throws Exception { assumeTrue(NativeIO.isAvailable()); final File TEST_FILE=new File(new File(System.getProperty("test.build.data","build/test/data")),"testMlockFile"); final int BUF_LEN=12289; byte buf[]=new byte[BUF_LEN]; int bufSum=0; for (int i=0; i < buf.length; i++) { buf[i]=(byte)(i % 60); bufSum+=buf[i]; } FileOutputStream fos=new FileOutputStream(TEST_FILE); try { fos.write(buf); fos.getChannel().force(true); } finally { fos.close(); } FileInputStream fis=null; FileChannel channel=null; try { fis=new FileInputStream(TEST_FILE); channel=fis.getChannel(); long fileSize=channel.size(); MappedByteBuffer mapbuf=channel.map(MapMode.READ_ONLY,0,fileSize); NativeIO.POSIX.mlock(mapbuf,fileSize); int sum=0; for (int i=0; i < fileSize; i++) { sum+=mapbuf.get(i); } assertEquals("Expected sums to be equal",bufSum,sum); NativeIO.POSIX.munmap(mapbuf); } finally { if (channel != null) { channel.close(); } if (fis != null) { fis.close(); } } }

APIUtilityVerifier UtilityVerifier BooleanVerifier HybridVerifier 
@Test(timeout=30000) public void testSetFilePointer() throws Exception { if (!Path.WINDOWS) { return; } LOG.info("Set a file pointer on Windows"); try { File testfile=new File(TEST_DIR,"testSetFilePointer"); assertTrue("Create test subject",testfile.exists() || testfile.createNewFile()); FileWriter writer=new FileWriter(testfile); try { for (int i=0; i < 200; i++) if (i < 100) writer.write('a'); else writer.write('b'); writer.flush(); } catch ( Exception writerException) { fail("Got unexpected exception: " + writerException.getMessage()); } finally { writer.close(); } FileDescriptor fd=NativeIO.Windows.createFile(testfile.getCanonicalPath(),NativeIO.Windows.GENERIC_READ,NativeIO.Windows.FILE_SHARE_READ | NativeIO.Windows.FILE_SHARE_WRITE | NativeIO.Windows.FILE_SHARE_DELETE,NativeIO.Windows.OPEN_EXISTING); NativeIO.Windows.setFilePointer(fd,120,NativeIO.Windows.FILE_BEGIN); FileReader reader=new FileReader(fd); try { int c=reader.read(); assertTrue("Unexpected character: " + c,c == 'b'); } catch ( Exception readerException) { fail("Got unexpected exception: " + readerException.getMessage()); } finally { reader.close(); } } catch ( Exception e) { fail("Got unexpected exception: " + e.getMessage()); } }

BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier AssumptionSetter EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testRenameTo() throws Exception { final File TEST_DIR=new File(new File(System.getProperty("test.build.data","build/test/data")),"renameTest"); assumeTrue(TEST_DIR.mkdirs()); File nonExistentFile=new File(TEST_DIR,"nonexistent"); File targetFile=new File(TEST_DIR,"target"); try { NativeIO.renameTo(nonExistentFile,targetFile); Assert.fail(); } catch ( NativeIOException e) { if (Path.WINDOWS) { Assert.assertEquals(String.format("The system cannot find the file specified.%n"),e.getMessage()); } else { Assert.assertEquals(Errno.ENOENT,e.getErrno()); } } File sourceFile=new File(TEST_DIR,"source"); Assert.assertTrue(sourceFile.createNewFile()); NativeIO.renameTo(sourceFile,sourceFile); NativeIO.renameTo(sourceFile,targetFile); sourceFile=new File(TEST_DIR,"source"); Assert.assertTrue(sourceFile.createNewFile()); File badTarget=new File(targetFile,"subdir"); try { NativeIO.renameTo(sourceFile,badTarget); Assert.fail(); } catch ( NativeIOException e) { if (Path.WINDOWS) { Assert.assertEquals(String.format("The parameter is incorrect.%n"),e.getMessage()); } else { Assert.assertEquals(Errno.ENOTDIR,e.getErrno()); } } FileUtils.deleteQuietly(TEST_DIR); }

UtilityVerifier AssumptionSetter EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testSyncFileRange() throws Exception { FileOutputStream fos=new FileOutputStream(new File(TEST_DIR,"testSyncFileRange")); try { fos.write("foo".getBytes()); NativeIO.POSIX.sync_file_range(fos.getFD(),0,1024,NativeIO.POSIX.SYNC_FILE_RANGE_WRITE); } catch ( UnsupportedOperationException uoe) { assumeTrue(false); } finally { fos.close(); } try { NativeIO.POSIX.sync_file_range(fos.getFD(),0,1024,NativeIO.POSIX.SYNC_FILE_RANGE_WRITE); fail("Did not throw on bad file"); } catch ( NativeIOException nioe) { assertEquals(Errno.EBADF,nioe.getErrno()); } }

TestInitializer AssumptionSetter HybridVerifier 
@Before public void checkLoaded(){ assumeTrue(NativeCodeLoader.isNativeCodeLoaded()); }

UtilityVerifier AssumptionSetter EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testPosixFadvise() throws Exception { if (Path.WINDOWS) { return; } FileInputStream fis=new FileInputStream("/dev/zero"); try { NativeIO.POSIX.posix_fadvise(fis.getFD(),0,0,NativeIO.POSIX.POSIX_FADV_SEQUENTIAL); } catch ( UnsupportedOperationException uoe) { assumeTrue(false); } catch ( NativeIOException nioe) { } finally { fis.close(); } try { NativeIO.POSIX.posix_fadvise(fis.getFD(),0,1024,NativeIO.POSIX.POSIX_FADV_SEQUENTIAL); fail("Did not throw on bad file"); } catch ( NativeIOException nioe) { assertEquals(Errno.EBADF,nioe.getErrno()); } try { NativeIO.POSIX.posix_fadvise(null,0,1024,NativeIO.POSIX.POSIX_FADV_SEQUENTIAL); fail("Did not throw on null file"); } catch ( NullPointerException npe) { } }

APIUtilityVerifier UtilityVerifier BooleanVerifier HybridVerifier 
@Test(timeout=30000) public void testCreateFile() throws Exception { if (!Path.WINDOWS) { return; } LOG.info("Open a file on Windows with SHARE_DELETE shared mode"); try { File testfile=new File(TEST_DIR,"testCreateFile"); assertTrue("Create test subject",testfile.exists() || testfile.createNewFile()); FileDescriptor fd=NativeIO.Windows.createFile(testfile.getCanonicalPath(),NativeIO.Windows.GENERIC_READ,NativeIO.Windows.FILE_SHARE_READ | NativeIO.Windows.FILE_SHARE_WRITE | NativeIO.Windows.FILE_SHARE_DELETE,NativeIO.Windows.OPEN_EXISTING); FileInputStream fin=new FileInputStream(fd); try { fin.read(); File newfile=new File(TEST_DIR,"testRenamedFile"); boolean renamed=testfile.renameTo(newfile); assertTrue("Rename failed.",renamed); fin.read(); } catch ( Exception e) { fail("Got unexpected exception: " + e.getMessage()); } finally { fin.close(); } } catch ( Exception e) { fail("Got unexpected exception: " + e.getMessage()); } }

IterativeVerifier BooleanVerifier NullVerifier HybridVerifier 
/** * Test that opens and closes a file 10000 times - this would crash with * "Too many open files" if we leaked fds using this access pattern. */ @Test(timeout=30000) public void testFDDoesntLeak() throws IOException { if (Path.WINDOWS) { return; } for (int i=0; i < 10000; i++) { FileDescriptor fd=NativeIO.POSIX.open(new File(TEST_DIR,"testNoFdLeak").getAbsolutePath(),NativeIO.POSIX.O_WRONLY | NativeIO.POSIX.O_CREAT,0700); assertNotNull(true); assertTrue(fd.valid()); FileOutputStream fos=new FileOutputStream(fd); fos.write("foo".getBytes()); fos.close(); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testFstat() throws Exception { FileOutputStream fos=new FileOutputStream(new File(TEST_DIR,"testfstat")); NativeIO.POSIX.Stat stat=NativeIO.POSIX.getFstat(fos.getFD()); fos.close(); LOG.info("Stat: " + String.valueOf(stat)); String owner=stat.getOwner(); String expectedOwner=System.getProperty("user.name"); if (Path.WINDOWS) { UserGroupInformation ugi=UserGroupInformation.createRemoteUser(expectedOwner); final String adminsGroupString="Administrators"; if (Arrays.asList(ugi.getGroupNames()).contains(adminsGroupString)) { expectedOwner=adminsGroupString; } } assertEquals(expectedOwner,owner); assertNotNull(stat.getGroup()); assertTrue(!stat.getGroup().isEmpty()); assertEquals("Stat mode field should indicate a regular file",NativeIO.POSIX.Stat.S_IFREG,stat.getMode() & NativeIO.POSIX.Stat.S_IFMT); }

UtilityVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testOpenMissingWithoutCreate() throws Exception { if (Path.WINDOWS) { return; } LOG.info("Open a missing file without O_CREAT and it should fail"); try { FileDescriptor fd=NativeIO.POSIX.open(new File(TEST_DIR,"doesntexist").getAbsolutePath(),NativeIO.POSIX.O_WRONLY,0700); fail("Able to open a new file without O_CREAT"); } catch ( NativeIOException nioe) { LOG.info("Got expected exception",nioe); assertEquals(Errno.ENOENT,nioe.getErrno()); } }

Class: org.apache.hadoop.io.nativeio.TestSharedFileDescriptorFactory

APIUtilityVerifier BooleanVerifier AssumptionSetter HybridVerifier 
@Test(timeout=10000) public void testCleanupRemainders() throws Exception { Assume.assumeTrue(NativeIO.isAvailable()); Assume.assumeTrue(SystemUtils.IS_OS_UNIX); File path=new File(TEST_BASE,"testCleanupRemainders"); path.mkdirs(); String remainder1=path.getAbsolutePath() + Path.SEPARATOR + "woot2_remainder1"; String remainder2=path.getAbsolutePath() + Path.SEPARATOR + "woot2_remainder2"; createTempFile(remainder1); createTempFile(remainder2); SharedFileDescriptorFactory.create("woot2_",new String[]{path.getAbsolutePath()}); Assert.assertFalse(new File(remainder1).exists()); Assert.assertFalse(new File(remainder2).exists()); FileUtil.fullyDelete(path); }

TestInitializer AssumptionSetter HybridVerifier 
@Before public void setup() throws Exception { Assume.assumeTrue(null == SharedFileDescriptorFactory.getLoadingFailureReason()); }

APIUtilityVerifier UtilityVerifier EqualityVerifier HybridVerifier 
@Test(timeout=60000) public void testDirectoryFallbacks() throws Exception { File nonExistentPath=new File(TEST_BASE,"nonexistent"); File permissionDeniedPath=new File("/"); File goodPath=new File(TEST_BASE,"testDirectoryFallbacks"); goodPath.mkdirs(); try { SharedFileDescriptorFactory.create("shm_",new String[]{nonExistentPath.getAbsolutePath(),permissionDeniedPath.getAbsolutePath()}); Assert.fail(); } catch ( IOException e) { } SharedFileDescriptorFactory factory=SharedFileDescriptorFactory.create("shm_",new String[]{nonExistentPath.getAbsolutePath(),permissionDeniedPath.getAbsolutePath(),goodPath.getAbsolutePath()}); Assert.assertEquals(goodPath.getAbsolutePath(),factory.getPath()); FileUtil.fullyDelete(goodPath); }

Class: org.apache.hadoop.io.retry.TestFailoverProxy

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testFailoverOnStandbyException() throws UnreliableException, IOException, StandbyException { UnreliableInterface unreliable=(UnreliableInterface)RetryProxy.create(UnreliableInterface.class,newFlipFlopProxyProvider(),RetryPolicies.failoverOnNetworkException(1)); assertEquals("impl1",unreliable.succeedsOnceThenFailsReturningString()); try { unreliable.succeedsOnceThenFailsReturningString(); fail("should not have succeeded twice"); } catch ( UnreliableException e) { assertEquals("impl1",e.getMessage()); } unreliable=(UnreliableInterface)RetryProxy.create(UnreliableInterface.class,newFlipFlopProxyProvider(TypeOfExceptionToFailWith.STANDBY_EXCEPTION,TypeOfExceptionToFailWith.UNRELIABLE_EXCEPTION),RetryPolicies.failoverOnNetworkException(1)); assertEquals("impl1",unreliable.succeedsOnceThenFailsReturningString()); assertEquals("impl2",unreliable.succeedsOnceThenFailsReturningString()); }

UtilityVerifier BooleanVerifier HybridVerifier 
/** * Ensure that normal IO exceptions don't result in a failover. */ @Test public void testExpectedIOException(){ UnreliableInterface unreliable=(UnreliableInterface)RetryProxy.create(UnreliableInterface.class,newFlipFlopProxyProvider(TypeOfExceptionToFailWith.REMOTE_EXCEPTION,TypeOfExceptionToFailWith.UNRELIABLE_EXCEPTION),RetryPolicies.failoverOnNetworkException(RetryPolicies.TRY_ONCE_THEN_FAIL,10,1000,10000)); try { unreliable.failsIfIdentifierDoesntMatch("no-such-identifier"); fail("Should have thrown *some* exception"); } catch ( Exception e) { assertTrue("Expected IOE but got " + e.getClass(),e instanceof IOException); } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testSuccedsOnceThenFailOver() throws UnreliableException, IOException, StandbyException { UnreliableInterface unreliable=(UnreliableInterface)RetryProxy.create(UnreliableInterface.class,newFlipFlopProxyProvider(),new FailOverOnceOnAnyExceptionPolicy()); assertEquals("impl1",unreliable.succeedsOnceThenFailsReturningString()); assertEquals("impl2",unreliable.succeedsOnceThenFailsReturningString()); try { unreliable.succeedsOnceThenFailsReturningString(); fail("should not have succeeded more than twice"); } catch ( UnreliableException e) { } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testFailoverOnNetworkExceptionIdempotentOperation() throws UnreliableException, IOException, StandbyException { UnreliableInterface unreliable=(UnreliableInterface)RetryProxy.create(UnreliableInterface.class,newFlipFlopProxyProvider(TypeOfExceptionToFailWith.IO_EXCEPTION,TypeOfExceptionToFailWith.UNRELIABLE_EXCEPTION),RetryPolicies.failoverOnNetworkException(1)); assertEquals("impl1",unreliable.succeedsOnceThenFailsReturningString()); try { unreliable.succeedsOnceThenFailsReturningString(); fail("should not have succeeded twice"); } catch ( IOException e) { assertEquals("impl1",e.getMessage()); } assertEquals("impl1",unreliable.succeedsOnceThenFailsReturningStringIdempotent()); assertEquals("impl2",unreliable.succeedsOnceThenFailsReturningStringIdempotent()); }

UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testNeverFailOver() throws UnreliableException, IOException, StandbyException { UnreliableInterface unreliable=(UnreliableInterface)RetryProxy.create(UnreliableInterface.class,newFlipFlopProxyProvider(),RetryPolicies.TRY_ONCE_THEN_FAIL); unreliable.succeedsOnceThenFailsReturningString(); try { unreliable.succeedsOnceThenFailsReturningString(); fail("should not have succeeded twice"); } catch ( UnreliableException e) { assertEquals("impl1",e.getMessage()); } }

Class: org.apache.hadoop.io.retry.TestRetryProxy

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test for {@link RetryInvocationHandler#isRpcInvocation(Object)} */ @Test public void testRpcInvocation() throws Exception { final UnreliableInterface unreliable=(UnreliableInterface)RetryProxy.create(UnreliableInterface.class,unreliableImpl,RETRY_FOREVER); assertTrue(RetryInvocationHandler.isRpcInvocation(unreliable)); ProtocolTranslator xlator=new ProtocolTranslator(){ int count=0; @Override public Object getUnderlyingProxyObject(){ count++; return unreliable; } @Override public String toString(){ return "" + count; } } ; assertTrue(RetryInvocationHandler.isRpcInvocation(xlator)); assertEquals(xlator.toString(),"1"); assertFalse(RetryInvocationHandler.isRpcInvocation(new Object())); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testRetryInterruptible() throws Throwable { final UnreliableInterface unreliable=(UnreliableInterface)RetryProxy.create(UnreliableInterface.class,unreliableImpl,retryUpToMaximumTimeWithFixedSleep(10,10,TimeUnit.SECONDS)); final CountDownLatch latch=new CountDownLatch(1); final AtomicReference futureThread=new AtomicReference(); ExecutorService exec=Executors.newSingleThreadExecutor(); Future future=exec.submit(new Callable(){ @Override public Throwable call() throws Exception { futureThread.set(Thread.currentThread()); latch.countDown(); try { unreliable.alwaysFailsWithFatalException(); } catch ( UndeclaredThrowableException ute) { return ute.getCause(); } return null; } } ); latch.await(); Thread.sleep(1000); assertTrue(futureThread.get().isAlive()); futureThread.get().interrupt(); Throwable e=future.get(1,TimeUnit.SECONDS); assertNotNull(e); assertEquals(InterruptedException.class,e.getClass()); assertEquals("sleep interrupted",e.getMessage()); }

Class: org.apache.hadoop.io.serializer.TestWritableSerialization

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testWritableConfigurable() throws Exception { conf.set(CONF_TEST_KEY,CONF_TEST_VALUE); FooGenericWritable generic=new FooGenericWritable(); generic.setConf(conf); Baz baz=new Baz(); generic.set(baz); Baz result=SerializationTestUtil.testSerialization(conf,baz); assertEquals(baz,result); assertNotNull(result.getConf()); }

Class: org.apache.hadoop.ipc.TestIPC

APIUtilityVerifier UtilityVerifier BooleanVerifier NullVerifier HybridVerifier 
@Test(timeout=60000) public void testStandAloneClient() throws IOException { Client client=new Client(LongWritable.class,conf); InetSocketAddress address=new InetSocketAddress("127.0.0.1",10); try { client.call(new LongWritable(RANDOM.nextLong()),address,null,null,0,conf); fail("Expected an exception to have been thrown"); } catch ( IOException e) { String message=e.getMessage(); String addressText=address.getHostName() + ":" + address.getPort(); assertTrue("Did not find " + addressText + " in "+ message,message.contains(addressText)); Throwable cause=e.getCause(); assertNotNull("No nested exception in " + e,cause); String causeText=cause.getMessage(); assertTrue("Did not find " + causeText + " in "+ message,message.contains(causeText)); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test if the rpc server gets the retry count from client. */ @Test(timeout=60000) public void testCallRetryCount() throws IOException { final int retryCount=255; final Client client=new Client(LongWritable.class,conf); Client.setCallIdAndRetryCount(Client.nextCallId(),255); final TestServer server=new TestServer(1,false); server.callListener=new Runnable(){ @Override public void run(){ Assert.assertEquals(retryCount,Server.getCallRetryCount()); } } ; try { InetSocketAddress addr=NetUtils.getConnectAddress(server); server.start(); final SerialCaller caller=new SerialCaller(client,addr,10); caller.run(); assertFalse(caller.failed); } finally { client.stop(); server.stop(); } }

BooleanVerifier AssumptionSetter HybridVerifier 
/** * Check that file descriptors aren't leaked by starting * and stopping IPC servers. */ @Test(timeout=60000) public void testSocketLeak() throws IOException { Assume.assumeTrue(FD_DIR.exists()); long startFds=countOpenFileDescriptors(); for (int i=0; i < 50; i++) { Server server=new TestServer(1,true); server.start(); server.stop(); } long endFds=countOpenFileDescriptors(); assertTrue("Leaked " + (endFds - startFds) + " file descriptors",endFds - startFds < 20); }

IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testConnectionIdleTimeouts() throws Exception { ((Log4JLogger)Server.LOG).getLogger().setLevel(Level.DEBUG); final int maxIdle=1000; final int cleanupInterval=maxIdle * 3 / 4; final int killMax=3; final int clients=1 + killMax * 2; conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,maxIdle); conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_IDLETHRESHOLD_KEY,0); conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_KILL_MAX_KEY,killMax); conf.setInt(CommonConfigurationKeys.IPC_CLIENT_CONNECTION_IDLESCANINTERVAL_KEY,cleanupInterval); final CyclicBarrier firstCallBarrier=new CyclicBarrier(2); final CyclicBarrier callBarrier=new CyclicBarrier(clients); final CountDownLatch allCallLatch=new CountDownLatch(clients); final AtomicBoolean error=new AtomicBoolean(); final TestServer server=new TestServer(clients,false); Thread[] threads=new Thread[clients]; try { server.callListener=new Runnable(){ AtomicBoolean first=new AtomicBoolean(true); @Override public void run(){ try { allCallLatch.countDown(); if (first.compareAndSet(true,false)) { firstCallBarrier.await(); } else { callBarrier.await(); } } catch ( Throwable t) { LOG.error(t); error.set(true); } } } ; server.start(); final CountDownLatch callReturned=new CountDownLatch(clients - 1); final InetSocketAddress addr=NetUtils.getConnectAddress(server); final Configuration clientConf=new Configuration(); clientConf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,10000); for (int i=0; i < clients; i++) { threads[i]=new Thread(new Runnable(){ @Override public void run(){ Client client=new Client(LongWritable.class,clientConf); try { client.call(new LongWritable(Thread.currentThread().getId()),addr,null,null,0,clientConf); callReturned.countDown(); Thread.sleep(10000); } catch ( IOException e) { LOG.error(e); } catch ( InterruptedException e) { } } } ); threads[i].start(); } allCallLatch.await(); assertFalse(error.get()); assertEquals(clients,server.getNumOpenConnections()); callBarrier.await(); callReturned.await(); assertEquals(clients,server.getNumOpenConnections()); Thread.sleep(maxIdle * 2 - cleanupInterval); for (int i=clients; i > 1; i-=killMax) { Thread.sleep(cleanupInterval); assertFalse(error.get()); assertEquals(i,server.getNumOpenConnections()); } Thread.sleep(cleanupInterval); assertFalse(error.get()); assertEquals(1,server.getNumOpenConnections()); firstCallBarrier.await(); Thread.sleep(maxIdle * 2); assertFalse(error.get()); assertEquals(0,server.getNumOpenConnections()); } finally { for ( Thread t : threads) { if (t != null) { t.interrupt(); t.join(); } server.stop(); } } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
/** * Test if * (1) the rpc server uses the call id/retry provided by the rpc client, and * (2) the rpc client receives the same call id/retry from the rpc server. */ @Test(timeout=60000) public void testCallIdAndRetry() throws IOException { final CallInfo info=new CallInfo(); final Client client=new Client(LongWritable.class,conf){ @Override Call createCall( RpcKind rpcKind, Writable rpcRequest){ final Call call=super.createCall(rpcKind,rpcRequest); info.id=call.id; info.retry=call.retry; return call; } @Override void checkResponse( RpcResponseHeaderProto header) throws IOException { super.checkResponse(header); Assert.assertEquals(info.id,header.getCallId()); Assert.assertEquals(info.retry,header.getRetryCount()); } } ; final TestServer server=new TestServer(1,false); server.callListener=new Runnable(){ @Override public void run(){ Assert.assertEquals(info.id,Server.getCallId()); Assert.assertEquals(info.retry,Server.getCallRetryCount()); } } ; try { InetSocketAddress addr=NetUtils.getConnectAddress(server); server.start(); final SerialCaller caller=new SerialCaller(client,addr,10); caller.run(); assertFalse(caller.failed); } finally { client.stop(); server.stop(); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test if the rpc server gets the default retry count (0) from client. */ @Test(timeout=60000) public void testInitialCallRetryCount() throws IOException { final Client client=new Client(LongWritable.class,conf); final TestServer server=new TestServer(1,false); server.callListener=new Runnable(){ @Override public void run(){ Assert.assertEquals(0,Server.getCallRetryCount()); } } ; try { InetSocketAddress addr=NetUtils.getConnectAddress(server); server.start(); final SerialCaller caller=new SerialCaller(client,addr,10); caller.run(); assertFalse(caller.failed); } finally { client.stop(); server.stop(); } }

APIUtilityVerifier IterativeVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Tests that client generates a unique sequential call ID for each RPC call, * even if multiple threads are using the same client. * @throws InterruptedException */ @Test(timeout=60000) public void testUniqueSequentialCallIds() throws IOException, InterruptedException { int serverThreads=10, callerCount=100, perCallerCallCount=100; TestServer server=new TestServer(serverThreads,false); final List callIds=Collections.synchronizedList(new ArrayList()); server.callListener=new Runnable(){ @Override public void run(){ callIds.add(Server.getCallId()); } } ; Client client=new Client(LongWritable.class,conf); try { InetSocketAddress addr=NetUtils.getConnectAddress(server); server.start(); SerialCaller[] callers=new SerialCaller[callerCount]; for (int i=0; i < callerCount; ++i) { callers[i]=new SerialCaller(client,addr,perCallerCallCount); callers[i].start(); } for (int i=0; i < callerCount; ++i) { callers[i].join(); assertFalse(callers[i].failed); } } finally { client.stop(); server.stop(); } int expectedCallCount=callerCount * perCallerCallCount; assertEquals(expectedCallCount,callIds.size()); Collections.sort(callIds); final int startID=callIds.get(0).intValue(); for (int i=0; i < expectedCallCount; ++i) { assertEquals(startID + i,callIds.get(i).intValue()); } }

UtilityVerifier BooleanVerifier HybridVerifier 
/** * Test that, if the socket factory throws an IOE, it properly propagates * to the client. */ @Test(timeout=60000) public void testSocketFactoryException() throws IOException { SocketFactory mockFactory=mock(SocketFactory.class); doThrow(new IOException("Injected fault")).when(mockFactory).createSocket(); Client client=new Client(LongWritable.class,conf,mockFactory); InetSocketAddress address=new InetSocketAddress("127.0.0.1",10); try { client.call(new LongWritable(RANDOM.nextLong()),address,null,null,0,conf); fail("Expected an exception to have been thrown"); } catch ( IOException e) { assertTrue(e.getMessage().contains("Injected fault")); } }

UtilityVerifier BooleanVerifier HybridVerifier 
/** * Test that, if a RuntimeException is thrown after creating a socket * but before successfully connecting to the IPC server, that the * failure is handled properly. This is a regression test for * HADOOP-7428. */ @Test(timeout=60000) public void testRTEDuringConnectionSetup() throws IOException { SocketFactory spyFactory=spy(NetUtils.getDefaultSocketFactory(conf)); Mockito.doAnswer(new Answer(){ @Override public Socket answer( InvocationOnMock invocation) throws Throwable { Socket s=spy((Socket)invocation.callRealMethod()); doThrow(new RuntimeException("Injected fault")).when(s).setSoTimeout(anyInt()); return s; } } ).when(spyFactory).createSocket(); Server server=new TestServer(1,true); server.start(); try { InetSocketAddress address=NetUtils.getConnectAddress(server); Client client=new Client(LongWritable.class,conf,spyFactory); try { client.call(new LongWritable(RANDOM.nextLong()),address,null,null,0,conf); fail("Expected an exception to have been thrown"); } catch ( Exception e) { LOG.info("caught expected exception",e); assertTrue(StringUtils.stringifyException(e).contains("Injected fault")); } Mockito.reset(spyFactory); client.call(new LongWritable(RANDOM.nextLong()),address,null,null,0,conf); } finally { server.stop(); } }

Class: org.apache.hadoop.ipc.TestIdentityProviders

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testPluggableIdentityProvider(){ Configuration conf=new Configuration(); conf.set(CommonConfigurationKeys.IPC_CALLQUEUE_IDENTITY_PROVIDER_KEY,"org.apache.hadoop.ipc.UserIdentityProvider"); List providers=conf.getInstances(CommonConfigurationKeys.IPC_CALLQUEUE_IDENTITY_PROVIDER_KEY,IdentityProvider.class); assertTrue(providers.size() == 1); IdentityProvider ip=providers.get(0); assertNotNull(ip); assertEquals(ip.getClass(),UserIdentityProvider.class); }

Class: org.apache.hadoop.ipc.TestProtoBufRpc

APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test(timeout=6000) public void testExtraLongRpc() throws Exception { TestRpcService2 client=getClient2(); final String shortString=StringUtils.repeat("X",4); EchoRequestProto echoRequest=EchoRequestProto.newBuilder().setMessage(shortString).build(); EchoResponseProto echoResponse=client.echo2(null,echoRequest); Assert.assertEquals(shortString,echoResponse.getMessage()); final String longString=StringUtils.repeat("X",4096); echoRequest=EchoRequestProto.newBuilder().setMessage(longString).build(); try { echoResponse=client.echo2(null,echoRequest); Assert.fail("expected extra-long RPC to fail"); } catch ( ServiceException se) { } }

Class: org.apache.hadoop.ipc.TestRPC

BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test that server.stop() properly stops all threads */ @Test public void testStopsAllThreads() throws IOException, InterruptedException { int threadsBefore=countThreads("Server$Listener$Reader"); assertEquals("Expect no Reader threads running before test",0,threadsBefore); final Server server=new RPC.Builder(conf).setProtocol(TestProtocol.class).setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true).build(); server.start(); try { int threadsRunning=0; long totalSleepTime=0; do { totalSleepTime+=10; Thread.sleep(10); threadsRunning=countThreads("Server$Listener$Reader"); } while (threadsRunning == 0 && totalSleepTime < 5000); threadsRunning=countThreads("Server$Listener$Reader"); assertTrue(threadsRunning > 0); } finally { server.stop(); } int threadsAfter=countThreads("Server$Listener$Reader"); assertEquals("Expect no Reader threads left running after test",0,threadsAfter); }

Class: org.apache.hadoop.ipc.TestRPCCompatibility

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Verify that ProtocolMetaInfoServerSideTranslatorPB correctly looks up * the server registry to extract protocol signatures and versions. */ @Test public void testProtocolMetaInfoSSTranslatorPB() throws Exception { TestImpl1 impl=new TestImpl1(); server=new RPC.Builder(conf).setProtocol(TestProtocol1.class).setInstance(impl).setBindAddress(ADDRESS).setPort(0).setNumHandlers(2).setVerbose(false).build(); server.addProtocol(RPC.RpcKind.RPC_WRITABLE,TestProtocol0.class,impl); server.start(); ProtocolMetaInfoServerSideTranslatorPB xlator=new ProtocolMetaInfoServerSideTranslatorPB(server); GetProtocolSignatureResponseProto resp=xlator.getProtocolSignature(null,createGetProtocolSigRequestProto(TestProtocol1.class,RPC.RpcKind.RPC_PROTOCOL_BUFFER)); Assert.assertEquals(0,resp.getProtocolSignatureCount()); resp=xlator.getProtocolSignature(null,createGetProtocolSigRequestProto(TestProtocol1.class,RPC.RpcKind.RPC_WRITABLE)); Assert.assertEquals(1,resp.getProtocolSignatureCount()); ProtocolSignatureProto sig=resp.getProtocolSignatureList().get(0); Assert.assertEquals(TestProtocol1.versionID,sig.getVersion()); boolean found=false; int expected=ProtocolSignature.getFingerprint(TestProtocol1.class.getMethod("echo",String.class)); for ( int m : sig.getMethodsList()) { if (expected == m) { found=true; break; } } Assert.assertTrue(found); }

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testHashCode() throws Exception { Method strMethod=TestProtocol3.class.getMethod("echo",String.class); int stringEchoHash=ProtocolSignature.getFingerprint(strMethod); Method intMethod=TestProtocol3.class.getMethod("echo",int.class); int intEchoHash=ProtocolSignature.getFingerprint(intMethod); assertFalse(stringEchoHash == intEchoHash); int intEchoHash1=ProtocolSignature.getFingerprint(TestProtocol2.class.getMethod("echo",int.class)); assertEquals(intEchoHash,intEchoHash1); int stringEchoHash1=ProtocolSignature.getFingerprint(TestProtocol2.class.getMethod("echo",String.class)); assertFalse(stringEchoHash == stringEchoHash1); int intEchoHashAlias=ProtocolSignature.getFingerprint(TestProtocol3.class.getMethod("echo_alias",int.class)); assertFalse(intEchoHash == intEchoHashAlias); int intEchoHash2=ProtocolSignature.getFingerprint(TestProtocol3.class.getMethod("echo",int.class,int.class)); assertFalse(intEchoHash == intEchoHash2); int hash1=ProtocolSignature.getFingerprint(new Method[]{intMethod,strMethod}); int hash2=ProtocolSignature.getFingerprint(new Method[]{strMethod,intMethod}); assertEquals(hash1,hash2); }

UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testVersionMismatch() throws IOException { server=new RPC.Builder(conf).setProtocol(TestProtocol2.class).setInstance(new TestImpl2()).setBindAddress(ADDRESS).setPort(0).setNumHandlers(2).setVerbose(false).build(); server.start(); addr=NetUtils.getConnectAddress(server); TestProtocol4 proxy=RPC.getProxy(TestProtocol4.class,TestProtocol4.versionID,addr,conf); try { proxy.echo(21); fail("The call must throw VersionMismatch exception"); } catch ( RemoteException ex) { Assert.assertEquals(RPC.VersionMismatch.class.getName(),ex.getClassName()); Assert.assertTrue(ex.getErrorCode().equals(RpcErrorCodeProto.ERROR_RPC_VERSION_MISMATCH)); } catch ( IOException ex) { fail("Expected version mismatch but got " + ex); } }

Class: org.apache.hadoop.ipc.TestSaslRPC

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testErrorMessage() throws Exception { BadTokenSecretManager sm=new BadTokenSecretManager(); final Server server=new RPC.Builder(conf).setProtocol(TestSaslProtocol.class).setInstance(new TestSaslImpl()).setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true).setSecretManager(sm).build(); boolean succeeded=false; try { doDigestRpc(server,sm); } catch ( RemoteException e) { LOG.info("LOGGING MESSAGE: " + e.getLocalizedMessage()); assertEquals(ERROR_MESSAGE,e.getLocalizedMessage()); assertTrue(e.unwrapRemoteException() instanceof InvalidToken); succeeded=true; } assertTrue(succeeded); }

NullVerifier EqualityVerifier HybridVerifier 
@Test public void testSaslPlainServerBadPassword(){ SaslException e=null; try { runNegotiation(new TestPlainCallbacks.Client("user","pass1"),new TestPlainCallbacks.Server("user","pass2")); } catch ( SaslException se) { e=se; } assertNotNull(e); assertEquals("PLAIN auth failed: wrong password",e.getMessage()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier 
@Test public void testPerConnectionConf() throws Exception { TestTokenSecretManager sm=new TestTokenSecretManager(); final Server server=new RPC.Builder(conf).setProtocol(TestSaslProtocol.class).setInstance(new TestSaslImpl()).setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true).setSecretManager(sm).build(); server.start(); final UserGroupInformation current=UserGroupInformation.getCurrentUser(); final InetSocketAddress addr=NetUtils.getConnectAddress(server); TestTokenIdentifier tokenId=new TestTokenIdentifier(new Text(current.getUserName())); Token token=new Token(tokenId,sm); SecurityUtil.setTokenService(token,addr); current.addToken(token); Configuration newConf=new Configuration(conf); newConf.set(CommonConfigurationKeysPublic.HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY,""); Client client=null; TestSaslProtocol proxy1=null; TestSaslProtocol proxy2=null; TestSaslProtocol proxy3=null; int timeouts[]={111222,3333333}; try { newConf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,timeouts[0]); proxy1=RPC.getProxy(TestSaslProtocol.class,TestSaslProtocol.versionID,addr,newConf); proxy1.getAuthMethod(); client=WritableRpcEngine.getClient(newConf); Set conns=client.getConnectionIds(); assertEquals("number of connections in cache is wrong",1,conns.size()); proxy2=RPC.getProxy(TestSaslProtocol.class,TestSaslProtocol.versionID,addr,newConf); proxy2.getAuthMethod(); assertEquals("number of connections in cache is wrong",1,conns.size()); newConf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,timeouts[1]); proxy3=RPC.getProxy(TestSaslProtocol.class,TestSaslProtocol.versionID,addr,newConf); proxy3.getAuthMethod(); assertEquals("number of connections in cache is wrong",2,conns.size()); ConnectionId[] connsArray={RPC.getConnectionIdForProxy(proxy1),RPC.getConnectionIdForProxy(proxy2),RPC.getConnectionIdForProxy(proxy3)}; assertEquals(connsArray[0],connsArray[1]); assertEquals(connsArray[0].getMaxIdleTime(),timeouts[0]); assertFalse(connsArray[0].equals(connsArray[2])); assertNotSame(connsArray[2].getMaxIdleTime(),timeouts[1]); } finally { server.stop(); if (client != null) { client.getConnectionIds().clear(); } if (proxy1 != null) RPC.stopProxy(proxy1); if (proxy2 != null) RPC.stopProxy(proxy2); if (proxy3 != null) RPC.stopProxy(proxy3); } }

Class: org.apache.hadoop.ipc.TestSocketFactory

APIUtilityVerifier InternalCallVerifier IdentityVerifier EqualityVerifier HybridVerifier 
@Test public void testSocketFactoryAsKeyInMap(){ Map dummyCache=new HashMap(); int toBeCached1=1; int toBeCached2=2; Configuration conf=new Configuration(); conf.set(CommonConfigurationKeys.HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY,"org.apache.hadoop.ipc.TestSocketFactory$DummySocketFactory"); final SocketFactory dummySocketFactory=NetUtils.getDefaultSocketFactory(conf); dummyCache.put(dummySocketFactory,toBeCached1); conf.set(CommonConfigurationKeys.HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY,"org.apache.hadoop.net.StandardSocketFactory"); final SocketFactory defaultSocketFactory=NetUtils.getDefaultSocketFactory(conf); dummyCache.put(defaultSocketFactory,toBeCached2); Assert.assertEquals("The cache contains two elements",2,dummyCache.size()); Assert.assertEquals("Equals of both socket factory shouldn't be same",defaultSocketFactory.equals(dummySocketFactory),false); assertSame(toBeCached2,dummyCache.remove(defaultSocketFactory)); dummyCache.put(defaultSocketFactory,toBeCached2); assertSame(toBeCached1,dummyCache.remove(dummySocketFactory)); }

Class: org.apache.hadoop.lib.lang.TestRunnableCallable

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void callable() throws Exception { C c=new C(); RunnableCallable rc=new RunnableCallable(c); rc.run(); assertTrue(c.RUN); c=new C(); rc=new RunnableCallable(c); rc.call(); assertTrue(c.RUN); assertEquals(rc.toString(),"C"); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void runnable() throws Exception { R r=new R(); RunnableCallable rc=new RunnableCallable(r); rc.run(); assertTrue(r.RUN); r=new R(); rc=new RunnableCallable(r); rc.call(); assertTrue(r.RUN); assertEquals(rc.toString(),"R"); }

Class: org.apache.hadoop.lib.lang.TestXException

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testXException() throws Exception { XException ex=new XException(TestERROR.TC); assertEquals(ex.getError(),TestERROR.TC); assertEquals(ex.getMessage(),"TC: {0}"); assertNull(ex.getCause()); ex=new XException(TestERROR.TC,"msg"); assertEquals(ex.getError(),TestERROR.TC); assertEquals(ex.getMessage(),"TC: msg"); assertNull(ex.getCause()); Exception cause=new Exception(); ex=new XException(TestERROR.TC,cause); assertEquals(ex.getError(),TestERROR.TC); assertEquals(ex.getMessage(),"TC: " + cause.toString()); assertEquals(ex.getCause(),cause); XException xcause=ex; ex=new XException(xcause); assertEquals(ex.getError(),TestERROR.TC); assertEquals(ex.getMessage(),xcause.getMessage()); assertEquals(ex.getCause(),xcause); }

Class: org.apache.hadoop.lib.server.TestBaseService

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void baseService() throws Exception { BaseService service=new MyService(); assertNull(service.getInterface()); assertEquals(service.getPrefix(),"myservice"); assertEquals(service.getServiceDependencies().length,0); Server server=Mockito.mock(Server.class); Configuration conf=new Configuration(false); conf.set("server.myservice.foo","FOO"); conf.set("server.myservice1.bar","BAR"); Mockito.when(server.getConfig()).thenReturn(conf); Mockito.when(server.getPrefixedName("myservice.foo")).thenReturn("server.myservice.foo"); Mockito.when(server.getPrefixedName("myservice.")).thenReturn("server.myservice."); service.init(server); assertEquals(service.getPrefixedName("foo"),"server.myservice.foo"); assertEquals(service.getServiceConfig().size(),1); assertEquals(service.getServiceConfig().get("foo"),"FOO"); assertTrue(MyService.INIT); }

Class: org.apache.hadoop.lib.server.TestServer

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test @TestDir public void lifeCycle() throws Exception { Configuration conf=new Configuration(false); conf.set("server.services",LifeCycleService.class.getName()); Server server=createServer(conf); assertEquals(server.getStatus(),Server.Status.UNDEF); server.init(); assertNotNull(server.get(LifeCycleService.class)); assertEquals(server.getStatus(),Server.Status.NORMAL); server.destroy(); assertEquals(server.getStatus(),Server.Status.SHUTDOWN); }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test @TestDir public void serviceLifeCycle() throws Exception { TestService.LIFECYCLE.clear(); Configuration conf=new Configuration(false); conf.set("server.services",TestService.class.getName()); Server server=createServer(conf); server.init(); assertNotNull(server.get(TestService.class)); server.destroy(); assertEquals(TestService.LIFECYCLE,Arrays.asList("init","postInit","serverStatusChange","destroy")); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test @TestDir public void constructorsGetters() throws Exception { Server server=new Server("server",getAbsolutePath("/a"),getAbsolutePath("/b"),getAbsolutePath("/c"),getAbsolutePath("/d"),new Configuration(false)); assertEquals(server.getHomeDir(),getAbsolutePath("/a")); assertEquals(server.getConfigDir(),getAbsolutePath("/b")); assertEquals(server.getLogDir(),getAbsolutePath("/c")); assertEquals(server.getTempDir(),getAbsolutePath("/d")); assertEquals(server.getName(),"server"); assertEquals(server.getPrefix(),"server"); assertEquals(server.getPrefixedName("name"),"server.name"); assertNotNull(server.getConfig()); server=new Server("server",getAbsolutePath("/a"),getAbsolutePath("/b"),getAbsolutePath("/c"),getAbsolutePath("/d")); assertEquals(server.getHomeDir(),getAbsolutePath("/a")); assertEquals(server.getConfigDir(),getAbsolutePath("/b")); assertEquals(server.getLogDir(),getAbsolutePath("/c")); assertEquals(server.getTempDir(),getAbsolutePath("/d")); assertEquals(server.getName(),"server"); assertEquals(server.getPrefix(),"server"); assertEquals(server.getPrefixedName("name"),"server.name"); assertNull(server.getConfig()); server=new Server("server",TestDirHelper.getTestDir().getAbsolutePath(),new Configuration(false)); assertEquals(server.getHomeDir(),TestDirHelper.getTestDir().getAbsolutePath()); assertEquals(server.getConfigDir(),TestDirHelper.getTestDir() + "/conf"); assertEquals(server.getLogDir(),TestDirHelper.getTestDir() + "/log"); assertEquals(server.getTempDir(),TestDirHelper.getTestDir() + "/temp"); assertEquals(server.getName(),"server"); assertEquals(server.getPrefix(),"server"); assertEquals(server.getPrefixedName("name"),"server.name"); assertNotNull(server.getConfig()); server=new Server("server",TestDirHelper.getTestDir().getAbsolutePath()); assertEquals(server.getHomeDir(),TestDirHelper.getTestDir().getAbsolutePath()); assertEquals(server.getConfigDir(),TestDirHelper.getTestDir() + "/conf"); assertEquals(server.getLogDir(),TestDirHelper.getTestDir() + "/log"); assertEquals(server.getTempDir(),TestDirHelper.getTestDir() + "/temp"); assertEquals(server.getName(),"server"); assertEquals(server.getPrefix(),"server"); assertEquals(server.getPrefixedName("name"),"server.name"); assertNull(server.getConfig()); }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test @TestDir public void services() throws Exception { String dir=TestDirHelper.getTestDir().getAbsolutePath(); Configuration conf; Server server; ORDER.clear(); conf=new Configuration(false); server=new Server("server",dir,dir,dir,dir,conf); server.init(); assertEquals(ORDER.size(),0); ORDER.clear(); String services=StringUtils.join(",",Arrays.asList(MyService1.class.getName(),MyService3.class.getName())); conf=new Configuration(false); conf.set("server.services",services); server=new Server("server",dir,dir,dir,dir,conf); server.init(); assertEquals(server.get(MyService1.class).getInterface(),MyService1.class); assertEquals(server.get(MyService3.class).getInterface(),MyService3.class); assertEquals(ORDER.size(),4); assertEquals(ORDER.get(0),"s1.init"); assertEquals(ORDER.get(1),"s3.init"); assertEquals(ORDER.get(2),"s1.postInit"); assertEquals(ORDER.get(3),"s3.postInit"); server.destroy(); assertEquals(ORDER.size(),6); assertEquals(ORDER.get(4),"s3.destroy"); assertEquals(ORDER.get(5),"s1.destroy"); ORDER.clear(); services=StringUtils.join(",",Arrays.asList(MyService1.class.getName(),MyService2.class.getName(),MyService3.class.getName())); conf=new Configuration(false); conf.set("server.services",services); server=new Server("server",dir,dir,dir,dir,conf); try { server.init(); fail(); } catch ( ServerException ex) { assertEquals(MyService2.class,ex.getError().getClass()); } catch ( Exception ex) { fail(); } assertEquals(ORDER.size(),3); assertEquals(ORDER.get(0),"s1.init"); assertEquals(ORDER.get(1),"s2.init"); assertEquals(ORDER.get(2),"s1.destroy"); ORDER.clear(); services=StringUtils.join(",",Arrays.asList(MyService1.class.getName(),MyService5.class.getName())); conf=new Configuration(false); conf.set("server.services",services); server=new Server("server",dir,dir,dir,dir,conf); server.init(); assertEquals(ORDER.size(),4); assertEquals(ORDER.get(0),"s1.init"); assertEquals(ORDER.get(1),"s5.init"); assertEquals(ORDER.get(2),"s1.postInit"); assertEquals(ORDER.get(3),"s5.postInit"); server.destroy(); assertEquals(ORDER.size(),6); assertEquals(ORDER.get(4),"s5.destroy"); assertEquals(ORDER.get(5),"s1.destroy"); ORDER.clear(); services=StringUtils.join(",",Arrays.asList(MyService1.class.getName(),MyService3.class.getName())); String servicesExt=StringUtils.join(",",Arrays.asList(MyService1a.class.getName())); conf=new Configuration(false); conf.set("server.services",services); conf.set("server.services.ext",servicesExt); server=new Server("server",dir,dir,dir,dir,conf); server.init(); assertEquals(server.get(MyService1.class).getClass(),MyService1a.class); assertEquals(ORDER.size(),4); assertEquals(ORDER.get(0),"s1a.init"); assertEquals(ORDER.get(1),"s3.init"); assertEquals(ORDER.get(2),"s1a.postInit"); assertEquals(ORDER.get(3),"s3.postInit"); server.destroy(); assertEquals(ORDER.size(),6); assertEquals(ORDER.get(4),"s3.destroy"); assertEquals(ORDER.get(5),"s1a.destroy"); ORDER.clear(); services=StringUtils.join(",",Arrays.asList(MyService1.class.getName(),MyService3.class.getName())); conf=new Configuration(false); conf.set("server.services",services); server=new Server("server",dir,dir,dir,dir,conf); server.init(); server.setService(MyService1a.class); assertEquals(ORDER.size(),6); assertEquals(ORDER.get(4),"s1.destroy"); assertEquals(ORDER.get(5),"s1a.init"); assertEquals(server.get(MyService1.class).getClass(),MyService1a.class); server.destroy(); assertEquals(ORDER.size(),8); assertEquals(ORDER.get(6),"s3.destroy"); assertEquals(ORDER.get(7),"s1a.destroy"); ORDER.clear(); services=StringUtils.join(",",Arrays.asList(MyService1.class.getName(),MyService3.class.getName())); conf=new Configuration(false); conf.set("server.services",services); server=new Server("server",dir,dir,dir,dir,conf); server.init(); server.setService(MyService5.class); assertEquals(ORDER.size(),5); assertEquals(ORDER.get(4),"s5.init"); assertEquals(server.get(MyService5.class).getClass(),MyService5.class); server.destroy(); assertEquals(ORDER.size(),8); assertEquals(ORDER.get(5),"s5.destroy"); assertEquals(ORDER.get(6),"s3.destroy"); assertEquals(ORDER.get(7),"s1.destroy"); ORDER.clear(); services=StringUtils.join(",",Arrays.asList(MyService1.class.getName(),MyService3.class.getName())); conf=new Configuration(false); conf.set("server.services",services); server=new Server("server",dir,dir,dir,dir,conf); server.init(); try { server.setService(MyService7.class); fail(); } catch ( ServerException ex) { assertEquals(ServerException.ERROR.S09,ex.getError()); } catch ( Exception ex) { fail(); } assertEquals(ORDER.size(),6); assertEquals(ORDER.get(4),"s3.destroy"); assertEquals(ORDER.get(5),"s1.destroy"); ORDER.clear(); services=StringUtils.join(",",Arrays.asList(MyService1.class.getName(),MyService6.class.getName())); conf=new Configuration(false); conf.set("server.services",services); server=new Server("server",dir,dir,dir,dir,conf); server.init(); assertEquals(server.get(MyService1.class).getInterface(),MyService1.class); assertEquals(server.get(MyService6.class).getInterface(),MyService6.class); server.destroy(); }

Class: org.apache.hadoop.lib.service.hadoop.TestFileSystemAccessService

UtilityVerifier EqualityVerifier HybridVerifier 
@Test @TestDir @TestHdfs public void fileSystemExecutorException() throws Exception { String dir=TestDirHelper.getTestDir().getAbsolutePath(); String services=StringUtils.join(",",Arrays.asList(InstrumentationService.class.getName(),SchedulerService.class.getName(),FileSystemAccessService.class.getName())); Configuration hadoopConf=new Configuration(false); hadoopConf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY,TestHdfsHelper.getHdfsConf().get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY)); createHadoopConf(hadoopConf); Configuration conf=new Configuration(false); conf.set("server.services",services); conf.set("server.hadoop.filesystem.cache.purge.timeout","0"); Server server=new Server("server",dir,dir,dir,dir,conf); server.init(); FileSystemAccess hadoop=server.get(FileSystemAccess.class); final FileSystem fsa[]=new FileSystem[1]; try { hadoop.execute("u",hadoop.getFileSystemConfiguration(),new FileSystemAccess.FileSystemExecutor(){ @Override public Void execute( FileSystem fs) throws IOException { fsa[0]=fs; throw new IOException(); } } ); Assert.fail(); } catch ( FileSystemAccessException ex) { Assert.assertEquals(ex.getError(),FileSystemAccessException.ERROR.H03); } catch ( Exception ex) { Assert.fail(); } try { fsa[0].mkdirs(new Path("/tmp/foo")); Assert.fail(); } catch ( IOException ex) { } catch ( Exception ex) { Assert.fail(); } server.destroy(); }

UtilityVerifier InternalCallVerifier NullVerifier HybridVerifier 
@Test @TestDir @TestHdfs public void createFileSystem() throws Exception { String dir=TestDirHelper.getTestDir().getAbsolutePath(); String services=StringUtils.join(",",Arrays.asList(InstrumentationService.class.getName(),SchedulerService.class.getName(),FileSystemAccessService.class.getName())); Configuration hadoopConf=new Configuration(false); hadoopConf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY,TestHdfsHelper.getHdfsConf().get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY)); createHadoopConf(hadoopConf); Configuration conf=new Configuration(false); conf.set("server.services",services); conf.set("server.hadoop.filesystem.cache.purge.timeout","0"); Server server=new Server("server",dir,dir,dir,dir,conf); server.init(); FileSystemAccess hadoop=server.get(FileSystemAccess.class); FileSystem fs=hadoop.createFileSystem("u",hadoop.getFileSystemConfiguration()); Assert.assertNotNull(fs); fs.mkdirs(new Path("/tmp/foo")); hadoop.releaseFileSystem(fs); try { fs.mkdirs(new Path("/tmp/foo")); Assert.fail(); } catch ( IOException ex) { } catch ( Exception ex) { Assert.fail(); } server.destroy(); }

UtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test @TestDir @TestHdfs public void fileSystemCache() throws Exception { String dir=TestDirHelper.getTestDir().getAbsolutePath(); String services=StringUtils.join(",",Arrays.asList(InstrumentationService.class.getName(),SchedulerService.class.getName(),FileSystemAccessService.class.getName())); Configuration hadoopConf=new Configuration(false); hadoopConf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY,TestHdfsHelper.getHdfsConf().get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY)); createHadoopConf(hadoopConf); Configuration conf=new Configuration(false); conf.set("server.services",services); conf.set("server.hadoop.filesystem.cache.purge.frequency","1"); conf.set("server.hadoop.filesystem.cache.purge.timeout","1"); Server server=new Server("server",dir,dir,dir,dir,conf); try { server.init(); FileSystemAccess hadoop=server.get(FileSystemAccess.class); FileSystem fs1=hadoop.createFileSystem("u",hadoop.getFileSystemConfiguration()); Assert.assertNotNull(fs1); fs1.mkdirs(new Path("/tmp/foo1")); hadoop.releaseFileSystem(fs1); fs1.mkdirs(new Path("/tmp/foo2")); FileSystem fs2=hadoop.createFileSystem("u",hadoop.getFileSystemConfiguration()); Assert.assertEquals(fs1,fs2); Thread.sleep(4 * 1000); fs1.mkdirs(new Path("/tmp/foo2")); Thread.sleep(4 * 1000); fs2.mkdirs(new Path("/tmp/foo")); hadoop.releaseFileSystem(fs2); Thread.sleep(4 * 1000); try { fs2.mkdirs(new Path("/tmp/foo")); Assert.fail(); } catch ( IOException ex) { } catch ( Exception ex) { Assert.fail(); } } finally { server.destroy(); } }

Class: org.apache.hadoop.lib.service.instrumentation.TestInstrumentationService

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void cron(){ InstrumentationService.Cron cron=new InstrumentationService.Cron(); assertEquals(cron.start,0); assertEquals(cron.lapStart,0); assertEquals(cron.own,0); assertEquals(cron.total,0); long begin=Time.now(); assertEquals(cron.start(),cron); assertEquals(cron.start(),cron); assertEquals(cron.start,begin,20); assertEquals(cron.start,cron.lapStart); sleep(100); assertEquals(cron.stop(),cron); long end=Time.now(); long delta=end - begin; assertEquals(cron.own,delta,20); assertEquals(cron.total,0); assertEquals(cron.lapStart,0); sleep(100); long reStart=Time.now(); cron.start(); assertEquals(cron.start,begin,20); assertEquals(cron.lapStart,reStart,20); sleep(100); cron.stop(); long reEnd=Time.now(); delta+=reEnd - reStart; assertEquals(cron.own,delta,20); assertEquals(cron.total,0); assertEquals(cron.lapStart,0); cron.end(); assertEquals(cron.total,reEnd - begin,20); try { cron.start(); fail(); } catch ( IllegalStateException ex) { } catch ( Exception ex) { fail(); } try { cron.stop(); fail(); } catch ( IllegalStateException ex) { } catch ( Exception ex) { fail(); } }

Class: org.apache.hadoop.lib.service.security.TestGroupsService

InternalCallVerifier IdentityVerifier NullVerifier HybridVerifier 
@Test @TestDir public void service() throws Exception { String dir=TestDirHelper.getTestDir().getAbsolutePath(); Configuration conf=new Configuration(false); conf.set("server.services",StringUtils.join(",",Arrays.asList(GroupsService.class.getName()))); Server server=new Server("server",dir,dir,dir,dir,conf); server.init(); Groups groups=server.get(Groups.class); assertNotNull(groups); List g=groups.getGroups(System.getProperty("user.name")); assertNotSame(g.size(),0); server.destroy(); }

Class: org.apache.hadoop.lib.servlet.TestHostnameFilter

BooleanVerifier NullVerifier HybridVerifier 
@Test public void testMissingHostname() throws Exception { ServletRequest request=Mockito.mock(ServletRequest.class); Mockito.when(request.getRemoteAddr()).thenReturn(null); ServletResponse response=Mockito.mock(ServletResponse.class); final AtomicBoolean invoked=new AtomicBoolean(); FilterChain chain=new FilterChain(){ @Override public void doFilter( ServletRequest servletRequest, ServletResponse servletResponse) throws IOException, ServletException { assertTrue(HostnameFilter.get().contains("???")); invoked.set(true); } } ; Filter filter=new HostnameFilter(); filter.init(null); assertNull(HostnameFilter.get()); filter.doFilter(request,response,chain); assertTrue(invoked.get()); assertNull(HostnameFilter.get()); filter.destroy(); }

BooleanVerifier NullVerifier HybridVerifier 
@Test public void hostname() throws Exception { ServletRequest request=Mockito.mock(ServletRequest.class); Mockito.when(request.getRemoteAddr()).thenReturn("localhost"); ServletResponse response=Mockito.mock(ServletResponse.class); final AtomicBoolean invoked=new AtomicBoolean(); FilterChain chain=new FilterChain(){ @Override public void doFilter( ServletRequest servletRequest, ServletResponse servletResponse) throws IOException, ServletException { assertTrue(HostnameFilter.get().contains("localhost") || HostnameFilter.get().contains("127.0.0.1")); invoked.set(true); } } ; Filter filter=new HostnameFilter(); filter.init(null); assertNull(HostnameFilter.get()); filter.doFilter(request,response,chain); assertTrue(invoked.get()); assertNull(HostnameFilter.get()); filter.destroy(); }

Class: org.apache.hadoop.lib.servlet.TestMDCFilter

BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void mdc() throws Exception { HttpServletRequest request=Mockito.mock(HttpServletRequest.class); Mockito.when(request.getUserPrincipal()).thenReturn(null); Mockito.when(request.getMethod()).thenReturn("METHOD"); Mockito.when(request.getPathInfo()).thenReturn("/pathinfo"); ServletResponse response=Mockito.mock(ServletResponse.class); final AtomicBoolean invoked=new AtomicBoolean(); FilterChain chain=new FilterChain(){ @Override public void doFilter( ServletRequest servletRequest, ServletResponse servletResponse) throws IOException, ServletException { assertEquals(MDC.get("hostname"),null); assertEquals(MDC.get("user"),null); assertEquals(MDC.get("method"),"METHOD"); assertEquals(MDC.get("path"),"/pathinfo"); invoked.set(true); } } ; MDC.clear(); Filter filter=new MDCFilter(); filter.init(null); filter.doFilter(request,response,chain); assertTrue(invoked.get()); assertNull(MDC.get("hostname")); assertNull(MDC.get("user")); assertNull(MDC.get("method")); assertNull(MDC.get("path")); Mockito.when(request.getUserPrincipal()).thenReturn(new Principal(){ @Override public String getName(){ return "name"; } } ); invoked.set(false); chain=new FilterChain(){ @Override public void doFilter( ServletRequest servletRequest, ServletResponse servletResponse) throws IOException, ServletException { assertEquals(MDC.get("hostname"),null); assertEquals(MDC.get("user"),"name"); assertEquals(MDC.get("method"),"METHOD"); assertEquals(MDC.get("path"),"/pathinfo"); invoked.set(true); } } ; filter.doFilter(request,response,chain); assertTrue(invoked.get()); HostnameFilter.HOSTNAME_TL.set("HOST"); invoked.set(false); chain=new FilterChain(){ @Override public void doFilter( ServletRequest servletRequest, ServletResponse servletResponse) throws IOException, ServletException { assertEquals(MDC.get("hostname"),"HOST"); assertEquals(MDC.get("user"),"name"); assertEquals(MDC.get("method"),"METHOD"); assertEquals(MDC.get("path"),"/pathinfo"); invoked.set(true); } } ; filter.doFilter(request,response,chain); assertTrue(invoked.get()); HostnameFilter.HOSTNAME_TL.remove(); filter.destroy(); }

Class: org.apache.hadoop.lib.util.TestConfigurationUtils

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void injectDefaults() throws Exception { Configuration srcConf=new Configuration(false); Configuration targetConf=new Configuration(false); srcConf.set("testParameter1","valueFromSource"); srcConf.set("testParameter2","valueFromSource"); targetConf.set("testParameter2","originalValueFromTarget"); targetConf.set("testParameter3","originalValueFromTarget"); ConfigurationUtils.injectDefaults(srcConf,targetConf); assertEquals("valueFromSource",targetConf.get("testParameter1")); assertEquals("originalValueFromTarget",targetConf.get("testParameter2")); assertEquals("originalValueFromTarget",targetConf.get("testParameter3")); assertEquals("valueFromSource",srcConf.get("testParameter1")); assertEquals("valueFromSource",srcConf.get("testParameter2")); assertNull(srcConf.get("testParameter3")); }

Class: org.apache.hadoop.lib.wsrs.TestJSONMapProvider

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test @SuppressWarnings("unchecked") public void test() throws Exception { JSONMapProvider p=new JSONMapProvider(); assertTrue(p.isWriteable(Map.class,null,null,null)); assertFalse(p.isWriteable(this.getClass(),null,null,null)); assertEquals(p.getSize(null,null,null,null,null),-1); ByteArrayOutputStream baos=new ByteArrayOutputStream(); JSONObject json=new JSONObject(); json.put("a","A"); p.writeTo(json,JSONObject.class,null,null,null,null,baos); baos.close(); assertEquals(new String(baos.toByteArray()).trim(),"{\"a\":\"A\"}"); }

Class: org.apache.hadoop.lib.wsrs.TestJSONProvider

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test @SuppressWarnings("unchecked") public void test() throws Exception { JSONProvider p=new JSONProvider(); assertTrue(p.isWriteable(JSONObject.class,null,null,null)); assertFalse(p.isWriteable(this.getClass(),null,null,null)); assertEquals(p.getSize(null,null,null,null,null),-1); ByteArrayOutputStream baos=new ByteArrayOutputStream(); JSONObject json=new JSONObject(); json.put("a","A"); p.writeTo(json,JSONObject.class,null,null,null,null,baos); baos.close(); assertEquals(new String(baos.toByteArray()).trim(),"{\"a\":\"A\"}"); }

Class: org.apache.hadoop.mapred.TestClientRedirect

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testRedirect() throws Exception { Configuration conf=new YarnConfiguration(); conf.set(MRConfig.FRAMEWORK_NAME,MRConfig.YARN_FRAMEWORK_NAME); conf.set(YarnConfiguration.RM_ADDRESS,RMADDRESS); conf.set(JHAdminConfig.MR_HISTORY_ADDRESS,HSHOSTADDRESS); RMService rmService=new RMService("test"); rmService.init(conf); rmService.start(); AMService amService=new AMService(); amService.init(conf); amService.start(conf); HistoryService historyService=new HistoryService(); historyService.init(conf); historyService.start(conf); LOG.info("services started"); Cluster cluster=new Cluster(conf); org.apache.hadoop.mapreduce.JobID jobID=new org.apache.hadoop.mapred.JobID("201103121733",1); org.apache.hadoop.mapreduce.Counters counters=cluster.getJob(jobID).getCounters(); validateCounters(counters); Assert.assertTrue(amContact); LOG.info("Sleeping for 5 seconds before stop for" + " the client socket to not get EOF immediately.."); Thread.sleep(5000); amService.stop(); LOG.info("Sleeping for 5 seconds after stop for" + " the server to exit cleanly.."); Thread.sleep(5000); amRestarting=true; counters=cluster.getJob(jobID).getCounters(); Assert.assertEquals(0,counters.countCounters()); Job job=cluster.getJob(jobID); org.apache.hadoop.mapreduce.TaskID taskId=new org.apache.hadoop.mapreduce.TaskID(jobID,TaskType.MAP,0); TaskAttemptID tId=new TaskAttemptID(taskId,0); job.killJob(); job.killTask(tId); job.failTask(tId); job.getTaskCompletionEvents(0,100); job.getStatus(); job.getTaskDiagnostics(tId); job.getTaskReports(TaskType.MAP); job.getTrackingURL(); amRestarting=false; amService=new AMService(); amService.init(conf); amService.start(conf); amContact=false; counters=cluster.getJob(jobID).getCounters(); validateCounters(counters); Assert.assertTrue(amContact); amService.stop(); counters=cluster.getJob(jobID).getCounters(); validateCounters(counters); Assert.assertTrue(hsContact); rmService.stop(); historyService.stop(); }

Class: org.apache.hadoop.mapred.TestClientServiceDelegate

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testJobReportFromHistoryServer() throws Exception { MRClientProtocol historyServerProxy=mock(MRClientProtocol.class); when(historyServerProxy.getJobReport(getJobReportRequest())).thenReturn(getJobReportResponseFromHistoryServer()); ResourceMgrDelegate rm=mock(ResourceMgrDelegate.class); when(rm.getApplicationReport(TypeConverter.toYarn(oldJobId).getAppId())).thenReturn(null); ClientServiceDelegate clientServiceDelegate=getClientServiceDelegate(historyServerProxy,rm); JobStatus jobStatus=clientServiceDelegate.getJobStatus(oldJobId); Assert.assertNotNull(jobStatus); Assert.assertEquals("TestJobFilePath",jobStatus.getJobFile()); Assert.assertEquals("http://TestTrackingUrl",jobStatus.getTrackingUrl()); Assert.assertEquals(1.0f,jobStatus.getMapProgress(),0.0f); Assert.assertEquals(1.0f,jobStatus.getReduceProgress(),0.0f); }

UtilityVerifier BooleanVerifier HybridVerifier 
@Test public void testRemoteExceptionFromHistoryServer() throws Exception { MRClientProtocol historyServerProxy=mock(MRClientProtocol.class); when(historyServerProxy.getJobReport(getJobReportRequest())).thenThrow(new IOException("Job ID doesnot Exist")); ResourceMgrDelegate rm=mock(ResourceMgrDelegate.class); when(rm.getApplicationReport(TypeConverter.toYarn(oldJobId).getAppId())).thenReturn(null); ClientServiceDelegate clientServiceDelegate=getClientServiceDelegate(historyServerProxy,rm); try { clientServiceDelegate.getJobStatus(oldJobId); Assert.fail("Invoke should throw exception after retries."); } catch ( IOException e) { Assert.assertTrue(e.getMessage().contains("Job ID doesnot Exist")); } }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testRetriesOnAMConnectionFailures() throws Exception { if (!isAMReachableFromClient) { return; } ResourceMgrDelegate rm=mock(ResourceMgrDelegate.class); when(rm.getApplicationReport(TypeConverter.toYarn(oldJobId).getAppId())).thenReturn(getRunningApplicationReport("am1",78)); final MRClientProtocol amProxy=mock(MRClientProtocol.class); when(amProxy.getJobReport(any(GetJobReportRequest.class))).thenThrow(new RuntimeException("11")).thenThrow(new RuntimeException("22")).thenThrow(new RuntimeException("33")).thenThrow(new RuntimeException("44")).thenReturn(getJobReportResponse()); Configuration conf=new YarnConfiguration(); conf.set(MRConfig.FRAMEWORK_NAME,MRConfig.YARN_FRAMEWORK_NAME); conf.setBoolean(MRJobConfig.JOB_AM_ACCESS_DISABLED,!isAMReachableFromClient); ClientServiceDelegate clientServiceDelegate=new ClientServiceDelegate(conf,rm,oldJobId,null){ @Override MRClientProtocol instantiateAMProxy( final InetSocketAddress serviceAddr) throws IOException { super.instantiateAMProxy(serviceAddr); return amProxy; } } ; JobStatus jobStatus=clientServiceDelegate.getJobStatus(oldJobId); Assert.assertNotNull(jobStatus); Assert.assertEquals(conf.getInt(MRJobConfig.MR_CLIENT_MAX_RETRIES,MRJobConfig.DEFAULT_MR_CLIENT_MAX_RETRIES),clientServiceDelegate.getMaxClientRetry()); verify(amProxy,times(5)).getJobReport(any(GetJobReportRequest.class)); }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testAMAccessDisabled() throws IOException { if (isAMReachableFromClient) { return; } MRClientProtocol historyServerProxy=mock(MRClientProtocol.class); when(historyServerProxy.getJobReport(getJobReportRequest())).thenReturn(getJobReportResponseFromHistoryServer()); ResourceMgrDelegate rmDelegate=mock(ResourceMgrDelegate.class); try { when(rmDelegate.getApplicationReport(jobId.getAppId())).thenReturn(getRunningApplicationReport("am1",78)).thenReturn(getRunningApplicationReport("am1",78)).thenReturn(getRunningApplicationReport("am1",78)).thenReturn(getFinishedApplicationReport()); } catch ( YarnException e) { throw new IOException(e); } ClientServiceDelegate clientServiceDelegate=spy(getClientServiceDelegate(historyServerProxy,rmDelegate)); JobStatus jobStatus=clientServiceDelegate.getJobStatus(oldJobId); Assert.assertNotNull(jobStatus); Assert.assertEquals("N/A",jobStatus.getJobName()); verify(clientServiceDelegate,times(0)).instantiateAMProxy(any(InetSocketAddress.class)); jobStatus=clientServiceDelegate.getJobStatus(oldJobId); Assert.assertNotNull(jobStatus); Assert.assertEquals("N/A",jobStatus.getJobName()); verify(clientServiceDelegate,times(0)).instantiateAMProxy(any(InetSocketAddress.class)); jobStatus=clientServiceDelegate.getJobStatus(oldJobId); Assert.assertNotNull(jobStatus); Assert.assertEquals("N/A",jobStatus.getJobName()); verify(clientServiceDelegate,times(0)).instantiateAMProxy(any(InetSocketAddress.class)); JobStatus jobStatus1=clientServiceDelegate.getJobStatus(oldJobId); Assert.assertNotNull(jobStatus1); Assert.assertEquals("TestJobFilePath",jobStatus1.getJobFile()); Assert.assertEquals("http://TestTrackingUrl",jobStatus1.getTrackingUrl()); Assert.assertEquals(1.0f,jobStatus1.getMapProgress(),0.0f); Assert.assertEquals(1.0f,jobStatus1.getReduceProgress(),0.0f); verify(clientServiceDelegate,times(0)).instantiateAMProxy(any(InetSocketAddress.class)); }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testCountersFromHistoryServer() throws Exception { MRClientProtocol historyServerProxy=mock(MRClientProtocol.class); when(historyServerProxy.getCounters(getCountersRequest())).thenReturn(getCountersResponseFromHistoryServer()); ResourceMgrDelegate rm=mock(ResourceMgrDelegate.class); when(rm.getApplicationReport(TypeConverter.toYarn(oldJobId).getAppId())).thenReturn(null); ClientServiceDelegate clientServiceDelegate=getClientServiceDelegate(historyServerProxy,rm); Counters counters=TypeConverter.toYarn(clientServiceDelegate.getJobCounters(oldJobId)); Assert.assertNotNull(counters); Assert.assertEquals(1001,counters.getCounterGroup("dummyCounters").getCounter("dummyCounter").getValue()); }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testReconnectOnAMRestart() throws IOException { if (!isAMReachableFromClient) { return; } MRClientProtocol historyServerProxy=mock(MRClientProtocol.class); ResourceMgrDelegate rmDelegate=mock(ResourceMgrDelegate.class); try { when(rmDelegate.getApplicationReport(jobId.getAppId())).thenReturn(getRunningApplicationReport("am1",78)).thenReturn(getRunningApplicationReport(null,0)).thenReturn(getRunningApplicationReport(null,0)).thenReturn(getRunningApplicationReport("am2",90)); } catch ( YarnException e) { throw new IOException(e); } GetJobReportResponse jobReportResponse1=mock(GetJobReportResponse.class); when(jobReportResponse1.getJobReport()).thenReturn(MRBuilderUtils.newJobReport(jobId,"jobName-firstGen","user",JobState.RUNNING,0,0,0,0,0,0,0,"anything",null,false,"")); MRClientProtocol firstGenAMProxy=mock(MRClientProtocol.class); when(firstGenAMProxy.getJobReport(any(GetJobReportRequest.class))).thenReturn(jobReportResponse1).thenThrow(new RuntimeException("AM is down!")); GetJobReportResponse jobReportResponse2=mock(GetJobReportResponse.class); when(jobReportResponse2.getJobReport()).thenReturn(MRBuilderUtils.newJobReport(jobId,"jobName-secondGen","user",JobState.RUNNING,0,0,0,0,0,0,0,"anything",null,false,"")); MRClientProtocol secondGenAMProxy=mock(MRClientProtocol.class); when(secondGenAMProxy.getJobReport(any(GetJobReportRequest.class))).thenReturn(jobReportResponse2); ClientServiceDelegate clientServiceDelegate=spy(getClientServiceDelegate(historyServerProxy,rmDelegate)); doReturn(firstGenAMProxy).doReturn(secondGenAMProxy).when(clientServiceDelegate).instantiateAMProxy(any(InetSocketAddress.class)); JobStatus jobStatus=clientServiceDelegate.getJobStatus(oldJobId); Assert.assertNotNull(jobStatus); Assert.assertEquals("jobName-firstGen",jobStatus.getJobName()); jobStatus=clientServiceDelegate.getJobStatus(oldJobId); Assert.assertNotNull(jobStatus); Assert.assertEquals("jobName-secondGen",jobStatus.getJobName()); jobStatus=clientServiceDelegate.getJobStatus(oldJobId); Assert.assertNotNull(jobStatus); Assert.assertEquals("jobName-secondGen",jobStatus.getJobName()); verify(clientServiceDelegate,times(2)).instantiateAMProxy(any(InetSocketAddress.class)); }

Class: org.apache.hadoop.mapred.TestClusterStatus

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@SuppressWarnings("deprecation") @Test(timeout=1000) public void testGraylistedTrackers(){ Assert.assertEquals(0,clusterStatus.getGraylistedTrackers()); Assert.assertTrue(clusterStatus.getGraylistedTrackerNames().isEmpty()); }

Class: org.apache.hadoop.mapred.TestCombineSequenceFileInputFormat

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testFormat() throws Exception { JobConf job=new JobConf(conf); Reporter reporter=Reporter.NULL; Random random=new Random(); long seed=random.nextLong(); LOG.info("seed = " + seed); random.setSeed(seed); localFs.delete(workDir,true); FileInputFormat.setInputPaths(job,workDir); final int length=10000; final int numFiles=10; createFiles(length,numFiles,random); InputFormat format=new CombineSequenceFileInputFormat(); IntWritable key=new IntWritable(); BytesWritable value=new BytesWritable(); for (int i=0; i < 3; i++) { int numSplits=random.nextInt(length / (SequenceFile.SYNC_INTERVAL / 20)) + 1; LOG.info("splitting: requesting = " + numSplits); InputSplit[] splits=format.getSplits(job,numSplits); LOG.info("splitting: got = " + splits.length); assertEquals("We got more than one splits!",1,splits.length); InputSplit split=splits[0]; assertEquals("It should be CombineFileSplit",CombineFileSplit.class,split.getClass()); BitSet bits=new BitSet(length); RecordReader reader=format.getRecordReader(split,job,reporter); try { while (reader.next(key,value)) { assertFalse("Key in multiple partitions.",bits.get(key.get())); bits.set(key.get()); } } finally { reader.close(); } assertEquals("Some keys in no partition.",length,bits.cardinality()); } }

Class: org.apache.hadoop.mapred.TestCombineTextInputFormat

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testFormat() throws Exception { JobConf job=new JobConf(defaultConf); Random random=new Random(); long seed=random.nextLong(); LOG.info("seed = " + seed); random.setSeed(seed); localFs.delete(workDir,true); FileInputFormat.setInputPaths(job,workDir); final int length=10000; final int numFiles=10; createFiles(length,numFiles,random); CombineTextInputFormat format=new CombineTextInputFormat(); LongWritable key=new LongWritable(); Text value=new Text(); for (int i=0; i < 3; i++) { int numSplits=random.nextInt(length / 20) + 1; LOG.info("splitting: requesting = " + numSplits); InputSplit[] splits=format.getSplits(job,numSplits); LOG.info("splitting: got = " + splits.length); assertEquals("We got more than one splits!",1,splits.length); InputSplit split=splits[0]; assertEquals("It should be CombineFileSplit",CombineFileSplit.class,split.getClass()); BitSet bits=new BitSet(length); LOG.debug("split= " + split); RecordReader reader=format.getRecordReader(split,job,voidReporter); try { int count=0; while (reader.next(key,value)) { int v=Integer.parseInt(value.toString()); LOG.debug("read " + v); if (bits.get(v)) { LOG.warn("conflict with " + v + " at position "+ reader.getPos()); } assertFalse("Key in multiple partitions.",bits.get(v)); bits.set(v); count++; } LOG.info("splits=" + split + " count="+ count); } finally { reader.close(); } assertEquals("Some keys in no partition.",length,bits.cardinality()); } }

APIUtilityVerifier BranchVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
/** * Test using the gzip codec for reading */ @Test(timeout=10000) public void testGzip() throws IOException { JobConf job=new JobConf(defaultConf); CompressionCodec gzip=new GzipCodec(); ReflectionUtils.setConf(gzip,job); localFs.delete(workDir,true); writeFile(localFs,new Path(workDir,"part1.txt.gz"),gzip,"the quick\nbrown\nfox jumped\nover\n the lazy\n dog\n"); writeFile(localFs,new Path(workDir,"part2.txt.gz"),gzip,"this is a test\nof gzip\n"); FileInputFormat.setInputPaths(job,workDir); CombineTextInputFormat format=new CombineTextInputFormat(); InputSplit[] splits=format.getSplits(job,100); assertEquals("compressed splits == 1",1,splits.length); List results=readSplit(format,splits[0],job); assertEquals("splits[0] length",8,results.size()); final String[] firstList={"the quick","brown","fox jumped","over"," the lazy"," dog"}; final String[] secondList={"this is a test","of gzip"}; String first=results.get(0).toString(); if (first.equals(firstList[0])) { testResults(results,firstList,secondList); } else if (first.equals(secondList[0])) { testResults(results,secondList,firstList); } else { fail("unexpected first token!"); } }

Class: org.apache.hadoop.mapred.TestConcatenatedCompressedInput

APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test using the raw Inflater codec for reading gzip files. */ @Test public void testPrototypeInflaterGzip() throws IOException { CompressionCodec gzip=new GzipCodec(); localFs.delete(workDir,true); System.out.println(COLOR_BR_BLUE + "testPrototypeInflaterGzip() using " + "non-native/Java Inflater and manual gzip header/trailer parsing"+ COLOR_NORMAL); final String fn="concat" + gzip.getDefaultExtension(); Path fnLocal=new Path(System.getProperty("test.concat.data","/tmp"),fn); Path fnHDFS=new Path(workDir,fn); localFs.copyFromLocalFile(fnLocal,fnHDFS); final FileInputStream in=new FileInputStream(fnLocal.toString()); assertEquals("concat bytes available",148,in.available()); byte[] compressedBuf=new byte[256]; int numBytesRead=in.read(compressedBuf,0,10); assertEquals("header bytes read",10,numBytesRead); assertEquals("1st byte",0x1f,compressedBuf[0] & 0xff); assertEquals("2nd byte",0x8b,compressedBuf[1] & 0xff); assertEquals("3rd byte (compression method)",8,compressedBuf[2] & 0xff); byte flags=(byte)(compressedBuf[3] & 0xff); if ((flags & 0x04) != 0) { numBytesRead=in.read(compressedBuf,0,2); assertEquals("XLEN bytes read",2,numBytesRead); int xlen=((compressedBuf[1] << 8) | compressedBuf[0]) & 0xffff; in.skip(xlen); } if ((flags & 0x08) != 0) { while ((numBytesRead=in.read()) != 0) { assertFalse("unexpected end-of-file while reading filename",numBytesRead == -1); } } if ((flags & 0x10) != 0) { while ((numBytesRead=in.read()) != 0) { assertFalse("unexpected end-of-file while reading comment",numBytesRead == -1); } } if ((flags & 0xe0) != 0) { assertTrue("reserved bits are set??",(flags & 0xe0) == 0); } if ((flags & 0x02) != 0) { numBytesRead=in.read(compressedBuf,0,2); assertEquals("CRC16 bytes read",2,numBytesRead); int crc16=((compressedBuf[1] << 8) | compressedBuf[0]) & 0xffff; } numBytesRead=in.read(compressedBuf); byte[] uncompressedBuf=new byte[256]; Inflater inflater=new Inflater(true); inflater.setInput(compressedBuf,0,numBytesRead); try { int numBytesUncompressed=inflater.inflate(uncompressedBuf); String outString=new String(uncompressedBuf,0,numBytesUncompressed,"UTF-8"); System.out.println("uncompressed data of first gzip member = [" + outString + "]"); } catch ( java.util.zip.DataFormatException ex) { throw new IOException(ex.getMessage()); } in.close(); }

Class: org.apache.hadoop.mapred.TestCounters

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testMakeCompactString(){ final String GC1="group1.counter1:1"; final String GC2="group2.counter2:3"; Counters counters=new Counters(); counters.incrCounter("group1","counter1",1); assertEquals("group1.counter1:1",counters.makeCompactString()); counters.incrCounter("group2","counter2",3); String cs=counters.makeCompactString(); assertTrue("Bad compact string",cs.equals(GC1 + ',' + GC2) || cs.equals(GC2 + ',' + GC1)); }

Class: org.apache.hadoop.mapred.TestFileInputFormat

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testSplitLocationInfo() throws Exception { Configuration conf=getConfiguration(); conf.set(org.apache.hadoop.mapreduce.lib.input.FileInputFormat.INPUT_DIR,"test:///a1/a2"); JobConf job=new JobConf(conf); TextInputFormat fileInputFormat=new TextInputFormat(); fileInputFormat.configure(job); FileSplit[] splits=(FileSplit[])fileInputFormat.getSplits(job,1); String[] locations=splits[0].getLocations(); Assert.assertEquals(2,locations.length); SplitLocationInfo[] locationInfo=splits[0].getLocationInfo(); Assert.assertEquals(2,locationInfo.length); SplitLocationInfo localhostInfo=locations[0].equals("localhost") ? locationInfo[0] : locationInfo[1]; SplitLocationInfo otherhostInfo=locations[0].equals("otherhost") ? locationInfo[0] : locationInfo[1]; Assert.assertTrue(localhostInfo.isOnDisk()); Assert.assertTrue(localhostInfo.isInMemory()); Assert.assertTrue(otherhostInfo.isOnDisk()); Assert.assertFalse(otherhostInfo.isInMemory()); }

UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testListStatusErrorOnNonExistantDir() throws IOException { Configuration conf=new Configuration(); conf.setInt(FileInputFormat.LIST_STATUS_NUM_THREADS,numThreads); org.apache.hadoop.mapreduce.lib.input.TestFileInputFormat.configureTestErrorOnNonExistantDir(conf,localFs); JobConf jobConf=new JobConf(conf); TextInputFormat fif=new TextInputFormat(); fif.configure(jobConf); try { fif.listStatus(jobConf); Assert.fail("Expecting an IOException for a missing Input path"); } catch ( IOException e) { Path expectedExceptionPath=new Path(TEST_ROOT_DIR,"input2"); expectedExceptionPath=localFs.makeQualified(expectedExceptionPath); Assert.assertTrue(e instanceof InvalidInputException); Assert.assertEquals("Input path does not exist: " + expectedExceptionPath.toString(),e.getMessage()); } }

Class: org.apache.hadoop.mapred.TestJobConf

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * test getters and setters of JobConf */ @SuppressWarnings("deprecation") @Test(timeout=5000) public void testJobConf(){ JobConf conf=new JobConf(); Pattern pattern=conf.getJarUnpackPattern(); assertEquals(Pattern.compile("(?:classes/|lib/).*").toString(),pattern.toString()); assertFalse(conf.getKeepFailedTaskFiles()); conf.setKeepFailedTaskFiles(true); assertTrue(conf.getKeepFailedTaskFiles()); assertNull(conf.getKeepTaskFilesPattern()); conf.setKeepTaskFilesPattern("123454"); assertEquals("123454",conf.getKeepTaskFilesPattern()); assertNotNull(conf.getWorkingDirectory()); conf.setWorkingDirectory(new Path("test")); assertTrue(conf.getWorkingDirectory().toString().endsWith("test")); assertEquals(1,conf.getNumTasksToExecutePerJvm()); assertNull(conf.getKeyFieldComparatorOption()); conf.setKeyFieldComparatorOptions("keySpec"); assertEquals("keySpec",conf.getKeyFieldComparatorOption()); assertFalse(conf.getUseNewReducer()); conf.setUseNewReducer(true); assertTrue(conf.getUseNewReducer()); assertTrue(conf.getMapSpeculativeExecution()); assertTrue(conf.getReduceSpeculativeExecution()); assertTrue(conf.getSpeculativeExecution()); conf.setReduceSpeculativeExecution(false); assertTrue(conf.getSpeculativeExecution()); conf.setMapSpeculativeExecution(false); assertFalse(conf.getSpeculativeExecution()); assertFalse(conf.getMapSpeculativeExecution()); assertFalse(conf.getReduceSpeculativeExecution()); conf.setSessionId("ses"); assertEquals("ses",conf.getSessionId()); assertEquals(3,conf.getMaxTaskFailuresPerTracker()); conf.setMaxTaskFailuresPerTracker(2); assertEquals(2,conf.getMaxTaskFailuresPerTracker()); assertEquals(0,conf.getMaxMapTaskFailuresPercent()); conf.setMaxMapTaskFailuresPercent(50); assertEquals(50,conf.getMaxMapTaskFailuresPercent()); assertEquals(0,conf.getMaxReduceTaskFailuresPercent()); conf.setMaxReduceTaskFailuresPercent(70); assertEquals(70,conf.getMaxReduceTaskFailuresPercent()); assertEquals(JobPriority.NORMAL.name(),conf.getJobPriority().name()); conf.setJobPriority(JobPriority.HIGH); assertEquals(JobPriority.HIGH.name(),conf.getJobPriority().name()); assertNull(conf.getJobSubmitHostName()); conf.setJobSubmitHostName("hostname"); assertEquals("hostname",conf.getJobSubmitHostName()); assertNull(conf.getJobSubmitHostAddress()); conf.setJobSubmitHostAddress("ww"); assertEquals("ww",conf.getJobSubmitHostAddress()); assertFalse(conf.getProfileEnabled()); conf.setProfileEnabled(true); assertTrue(conf.getProfileEnabled()); assertEquals(conf.getProfileTaskRange(true).toString(),"0-2"); assertEquals(conf.getProfileTaskRange(false).toString(),"0-2"); conf.setProfileTaskRange(true,"0-3"); assertEquals(conf.getProfileTaskRange(false).toString(),"0-2"); assertEquals(conf.getProfileTaskRange(true).toString(),"0-3"); assertNull(conf.getMapDebugScript()); conf.setMapDebugScript("mDbgScript"); assertEquals("mDbgScript",conf.getMapDebugScript()); assertNull(conf.getReduceDebugScript()); conf.setReduceDebugScript("rDbgScript"); assertEquals("rDbgScript",conf.getReduceDebugScript()); assertNull(conf.getJobLocalDir()); assertEquals("default",conf.getQueueName()); conf.setQueueName("qname"); assertEquals("qname",conf.getQueueName()); conf.setMemoryForMapTask(100 * 1000); assertEquals(100 * 1000,conf.getMemoryForMapTask()); conf.setMemoryForReduceTask(1000 * 1000); assertEquals(1000 * 1000,conf.getMemoryForReduceTask()); assertEquals(-1,conf.getMaxPhysicalMemoryForTask()); assertEquals("The variable key is no longer used.",JobConf.deprecatedString("key")); assertEquals("mapreduce.map.java.opts should not be set by default",null,conf.get(JobConf.MAPRED_MAP_TASK_JAVA_OPTS)); assertEquals("mapreduce.reduce.java.opts should not be set by default",null,conf.get(JobConf.MAPRED_REDUCE_TASK_JAVA_OPTS)); }

Class: org.apache.hadoop.mapred.TestLineRecordReader

BooleanVerifier NullVerifier HybridVerifier 
@Test public void testStripBOM() throws IOException { String UTF8_BOM="\uFEFF"; URL testFileUrl=getClass().getClassLoader().getResource("testBOM.txt"); assertNotNull("Cannot find testBOM.txt",testFileUrl); File testFile=new File(testFileUrl.getFile()); Path testFilePath=new Path(testFile.getAbsolutePath()); long testFileSize=testFile.length(); Configuration conf=new Configuration(); conf.setInt(org.apache.hadoop.mapreduce.lib.input.LineRecordReader.MAX_LINE_LENGTH,Integer.MAX_VALUE); FileSplit split=new FileSplit(testFilePath,0,testFileSize,(String[])null); LineRecordReader reader=new LineRecordReader(conf,split); LongWritable key=new LongWritable(); Text value=new Text(); int numRecords=0; boolean firstLine=true; boolean skipBOM=true; while (reader.next(key,value)) { if (firstLine) { firstLine=false; if (value.toString().startsWith(UTF8_BOM)) { skipBOM=false; } } ++numRecords; } reader.close(); assertTrue("BOM is not skipped",skipBOM); }

Class: org.apache.hadoop.mapred.TestMRWithDistributedCache

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=1000) public void testDeprecatedFunctions() throws Exception { DistributedCache.addLocalArchives(conf,"Test Local Archives 1"); Assert.assertEquals("Test Local Archives 1",conf.get(DistributedCache.CACHE_LOCALARCHIVES)); Assert.assertEquals(1,DistributedCache.getLocalCacheArchives(conf).length); Assert.assertEquals("Test Local Archives 1",DistributedCache.getLocalCacheArchives(conf)[0].getName()); DistributedCache.addLocalArchives(conf,"Test Local Archives 2"); Assert.assertEquals("Test Local Archives 1,Test Local Archives 2",conf.get(DistributedCache.CACHE_LOCALARCHIVES)); Assert.assertEquals(2,DistributedCache.getLocalCacheArchives(conf).length); Assert.assertEquals("Test Local Archives 2",DistributedCache.getLocalCacheArchives(conf)[1].getName()); DistributedCache.setLocalArchives(conf,"Test Local Archives 3"); Assert.assertEquals("Test Local Archives 3",conf.get(DistributedCache.CACHE_LOCALARCHIVES)); Assert.assertEquals(1,DistributedCache.getLocalCacheArchives(conf).length); Assert.assertEquals("Test Local Archives 3",DistributedCache.getLocalCacheArchives(conf)[0].getName()); DistributedCache.addLocalFiles(conf,"Test Local Files 1"); Assert.assertEquals("Test Local Files 1",conf.get(DistributedCache.CACHE_LOCALFILES)); Assert.assertEquals(1,DistributedCache.getLocalCacheFiles(conf).length); Assert.assertEquals("Test Local Files 1",DistributedCache.getLocalCacheFiles(conf)[0].getName()); DistributedCache.addLocalFiles(conf,"Test Local Files 2"); Assert.assertEquals("Test Local Files 1,Test Local Files 2",conf.get(DistributedCache.CACHE_LOCALFILES)); Assert.assertEquals(2,DistributedCache.getLocalCacheFiles(conf).length); Assert.assertEquals("Test Local Files 2",DistributedCache.getLocalCacheFiles(conf)[1].getName()); DistributedCache.setLocalFiles(conf,"Test Local Files 3"); Assert.assertEquals("Test Local Files 3",conf.get(DistributedCache.CACHE_LOCALFILES)); Assert.assertEquals(1,DistributedCache.getLocalCacheFiles(conf).length); Assert.assertEquals("Test Local Files 3",DistributedCache.getLocalCacheFiles(conf)[0].getName()); DistributedCache.setArchiveTimestamps(conf,"1234567890"); Assert.assertEquals(1234567890,conf.getLong(DistributedCache.CACHE_ARCHIVES_TIMESTAMPS,0)); Assert.assertEquals(1,DistributedCache.getArchiveTimestamps(conf).length); Assert.assertEquals(1234567890,DistributedCache.getArchiveTimestamps(conf)[0]); DistributedCache.setFileTimestamps(conf,"1234567890"); Assert.assertEquals(1234567890,conf.getLong(DistributedCache.CACHE_FILES_TIMESTAMPS,0)); Assert.assertEquals(1,DistributedCache.getFileTimestamps(conf).length); Assert.assertEquals(1234567890,DistributedCache.getFileTimestamps(conf)[0]); DistributedCache.createAllSymlink(conf,new File("Test Job Cache Dir"),new File("Test Work Dir")); Assert.assertNull(conf.get(DistributedCache.CACHE_SYMLINK)); Assert.assertTrue(DistributedCache.getSymlink(conf)); Assert.assertTrue(symlinkFile.createNewFile()); FileStatus fileStatus=DistributedCache.getFileStatus(conf,symlinkFile.toURI()); Assert.assertNotNull(fileStatus); Assert.assertEquals(fileStatus.getModificationTime(),DistributedCache.getTimestamp(conf,symlinkFile.toURI())); Assert.assertTrue(symlinkFile.delete()); DistributedCache.addCacheArchive(symlinkFile.toURI(),conf); Assert.assertEquals(symlinkFile.toURI().toString(),conf.get(DistributedCache.CACHE_ARCHIVES)); Assert.assertEquals(1,DistributedCache.getCacheArchives(conf).length); Assert.assertEquals(symlinkFile.toURI(),DistributedCache.getCacheArchives(conf)[0]); DistributedCache.addCacheFile(symlinkFile.toURI(),conf); Assert.assertEquals(symlinkFile.toURI().toString(),conf.get(DistributedCache.CACHE_FILES)); Assert.assertEquals(1,DistributedCache.getCacheFiles(conf).length); Assert.assertEquals(symlinkFile.toURI(),DistributedCache.getCacheFiles(conf)[0]); }

Class: org.apache.hadoop.mapred.TestMaster

APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testGetMasterAddress(){ YarnConfiguration conf=new YarnConfiguration(); String masterHostname=Master.getMasterAddress(conf).getHostName(); InetSocketAddress rmAddr=NetUtils.createSocketAddr(YarnConfiguration.DEFAULT_RM_ADDRESS); assertEquals(masterHostname,rmAddr.getHostName()); conf.set(MRConfig.FRAMEWORK_NAME,MRConfig.CLASSIC_FRAMEWORK_NAME); conf.set(MRConfig.MASTER_ADDRESS,"local:invalid"); try { Master.getMasterAddress(conf); fail("Should not reach here as there is a bad master address"); } catch ( Exception e) { } conf.set(MRConfig.MASTER_ADDRESS,"bar.com:8042"); masterHostname=Master.getMasterAddress(conf).getHostName(); assertEquals(masterHostname,"bar.com"); conf.set(MRConfig.FRAMEWORK_NAME,MRConfig.YARN_FRAMEWORK_NAME); conf.set(YarnConfiguration.RM_ADDRESS,"foo1.com:8192"); masterHostname=Master.getMasterAddress(conf).getHostName(); assertEquals(masterHostname,"foo1.com"); }

Class: org.apache.hadoop.mapred.TestMiniMRChildTask

APIUtilityVerifier UtilityVerifier BooleanVerifier HybridVerifier 
/** * To test OS dependent setting of default execution path for a MapRed task. * Mainly that we can use MRJobConfig.DEFAULT_MAPRED_ADMIN_USER_ENV to set - * for WINDOWS: %HADOOP_COMMON_HOME%\bin is expected to be included in PATH - for * Linux: $HADOOP_COMMON_HOME/lib/native is expected to be included in * LD_LIBRARY_PATH */ @Test public void testMapRedExecutionEnv(){ try { Map environment=new HashMap(); String setupHadoopHomeCommand=Shell.WINDOWS ? "HADOOP_COMMON_HOME=C:\\fake\\PATH\\to\\hadoop\\common\\home" : "HADOOP_COMMON_HOME=/fake/path/to/hadoop/common/home"; MRApps.setEnvFromInputString(environment,setupHadoopHomeCommand,conf); MRApps.setEnvFromInputString(environment,conf.get(MRJobConfig.MAPRED_ADMIN_USER_ENV,MRJobConfig.DEFAULT_MAPRED_ADMIN_USER_ENV),conf); String executionPaths=environment.get(Shell.WINDOWS ? "PATH" : "LD_LIBRARY_PATH"); String toFind=Shell.WINDOWS ? "C:\\fake\\PATH\\to\\hadoop\\common\\home\\bin" : "/fake/path/to/hadoop/common/home/lib/native"; assertTrue("execution path does not include the hadoop lib location " + toFind,executionPaths.contains(toFind)); } catch ( Exception e) { e.printStackTrace(); fail("Exception in testing execution environment for MapReduce task"); tearDown(); } try { JobConf conf=new JobConf(mr.getConfig()); Path inDir=new Path("input"); Path outDir=new Path("output"); String input="The input"; configure(conf,inDir,outDir,input,ExecutionEnvCheckMapClass.class,IdentityReducer.class); launchTest(conf,inDir,outDir,input); } catch ( Exception e) { e.printStackTrace(); fail("Exception in testing propagation of env setting to child task"); tearDown(); } }

Class: org.apache.hadoop.mapred.TestNetworkedJob

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * test JobConf * @throws Exception */ @SuppressWarnings("deprecation") @Test(timeout=500000) public void testNetworkedJob() throws Exception { MiniMRClientCluster mr=null; FileSystem fileSys=null; try { mr=createMiniClusterWithCapacityScheduler(); JobConf job=new JobConf(mr.getConfig()); fileSys=FileSystem.get(job); fileSys.delete(testDir,true); FSDataOutputStream out=fileSys.create(inFile,true); out.writeBytes("This is a test file"); out.close(); FileInputFormat.setInputPaths(job,inFile); FileOutputFormat.setOutputPath(job,outDir); job.setInputFormat(TextInputFormat.class); job.setOutputFormat(TextOutputFormat.class); job.setMapperClass(IdentityMapper.class); job.setReducerClass(IdentityReducer.class); job.setNumReduceTasks(0); JobClient client=new JobClient(mr.getConfig()); RunningJob rj=client.submitJob(job); JobID jobId=rj.getID(); NetworkedJob runningJob=(NetworkedJob)client.getJob(jobId); runningJob.setJobPriority(JobPriority.HIGH.name()); assertTrue(runningJob.getConfiguration().toString().endsWith("0001/job.xml")); assertEquals(runningJob.getID(),jobId); assertEquals(runningJob.getJobID(),jobId.toString()); assertEquals(runningJob.getJobName(),"N/A"); assertTrue(runningJob.getJobFile().endsWith(".staging/" + runningJob.getJobID() + "/job.xml")); assertTrue(runningJob.getTrackingURL().length() > 0); assertTrue(runningJob.mapProgress() == 0.0f); assertTrue(runningJob.reduceProgress() == 0.0f); assertTrue(runningJob.cleanupProgress() == 0.0f); assertTrue(runningJob.setupProgress() == 0.0f); TaskCompletionEvent[] tce=runningJob.getTaskCompletionEvents(0); assertEquals(tce.length,0); assertEquals(runningJob.getHistoryUrl(),""); assertFalse(runningJob.isRetired()); assertEquals(runningJob.getFailureInfo(),""); assertEquals(runningJob.getJobStatus().getJobName(),"N/A"); assertEquals(client.getMapTaskReports(jobId).length,0); try { client.getSetupTaskReports(jobId); } catch ( YarnRuntimeException e) { assertEquals(e.getMessage(),"Unrecognized task type: JOB_SETUP"); } try { client.getCleanupTaskReports(jobId); } catch ( YarnRuntimeException e) { assertEquals(e.getMessage(),"Unrecognized task type: JOB_CLEANUP"); } assertEquals(client.getReduceTaskReports(jobId).length,0); ClusterStatus status=client.getClusterStatus(true); assertEquals(status.getActiveTrackerNames().size(),2); assertEquals(status.getBlacklistedTrackers(),0); assertEquals(status.getBlacklistedTrackerNames().size(),0); assertEquals(status.getBlackListedTrackersInfo().size(),0); assertEquals(status.getJobTrackerStatus(),JobTrackerStatus.RUNNING); assertEquals(status.getMapTasks(),1); assertEquals(status.getMaxMapTasks(),20); assertEquals(status.getMaxReduceTasks(),4); assertEquals(status.getNumExcludedNodes(),0); assertEquals(status.getReduceTasks(),1); assertEquals(status.getTaskTrackers(),2); assertEquals(status.getTTExpiryInterval(),0); assertEquals(status.getJobTrackerStatus(),JobTrackerStatus.RUNNING); assertEquals(status.getGraylistedTrackers(),0); ByteArrayOutputStream dataOut=new ByteArrayOutputStream(); status.write(new DataOutputStream(dataOut)); ClusterStatus status2=new ClusterStatus(); status2.readFields(new DataInputStream(new ByteArrayInputStream(dataOut.toByteArray()))); assertEquals(status.getActiveTrackerNames(),status2.getActiveTrackerNames()); assertEquals(status.getBlackListedTrackersInfo(),status2.getBlackListedTrackersInfo()); assertEquals(status.getMapTasks(),status2.getMapTasks()); try { } catch ( RuntimeException e) { assertTrue(e.getMessage().endsWith("not found on CLASSPATH")); } JobClient.setTaskOutputFilter(job,TaskStatusFilter.ALL); assertEquals(JobClient.getTaskOutputFilter(job),TaskStatusFilter.ALL); assertEquals(client.getDefaultMaps(),20); assertEquals(client.getDefaultReduces(),4); assertEquals(client.getSystemDir().getName(),"jobSubmitDir"); JobQueueInfo[] rootQueueInfo=client.getRootQueues(); assertEquals(rootQueueInfo.length,1); assertEquals(rootQueueInfo[0].getQueueName(),"default"); JobQueueInfo[] qinfo=client.getQueues(); assertEquals(qinfo.length,1); assertEquals(qinfo[0].getQueueName(),"default"); assertEquals(client.getChildQueues("default").length,0); assertEquals(client.getJobsFromQueue("default").length,1); assertTrue(client.getJobsFromQueue("default")[0].getJobFile().endsWith("/job.xml")); JobQueueInfo qi=client.getQueueInfo("default"); assertEquals(qi.getQueueName(),"default"); assertEquals(qi.getQueueState(),"running"); QueueAclsInfo[] aai=client.getQueueAclsForCurrentUser(); assertEquals(aai.length,2); assertEquals(aai[0].getQueueName(),"root"); assertEquals(aai[1].getQueueName(),"default"); Token token=client.getDelegationToken(new Text(UserGroupInformation.getCurrentUser().getShortUserName())); assertEquals(token.getKind().toString(),"RM_DELEGATION_TOKEN"); assertEquals("Expected matching JobIDs",jobId,client.getJob(jobId).getJobStatus().getJobID()); assertEquals("Expected matching startTimes",rj.getJobStatus().getStartTime(),client.getJob(jobId).getJobStatus().getStartTime()); } finally { if (fileSys != null) { fileSys.delete(testDir,true); } if (mr != null) { mr.stop(); } } }

Class: org.apache.hadoop.mapred.TestOldCombinerGrouping

APIUtilityVerifier BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testCombiner() throws Exception { if (!new File(TEST_ROOT_DIR).mkdirs()) { throw new RuntimeException("Could not create test dir: " + TEST_ROOT_DIR); } File in=new File(TEST_ROOT_DIR,"input"); if (!in.mkdirs()) { throw new RuntimeException("Could not create test dir: " + in); } File out=new File(TEST_ROOT_DIR,"output"); PrintWriter pw=new PrintWriter(new FileWriter(new File(in,"data.txt"))); pw.println("A|a,1"); pw.println("A|b,2"); pw.println("B|a,3"); pw.println("B|b,4"); pw.println("B|c,5"); pw.close(); JobConf job=new JobConf(); job.set("mapreduce.framework.name","local"); TextInputFormat.setInputPaths(job,new Path(in.getPath())); TextOutputFormat.setOutputPath(job,new Path(out.getPath())); job.setMapperClass(Map.class); job.setReducerClass(Reduce.class); job.setInputFormat(TextInputFormat.class); job.setMapOutputKeyClass(Text.class); job.setMapOutputValueClass(LongWritable.class); job.setOutputFormat(TextOutputFormat.class); job.setOutputValueGroupingComparator(GroupComparator.class); job.setCombinerClass(Combiner.class); job.setCombinerKeyGroupingComparator(GroupComparator.class); job.setInt("min.num.spills.for.combine",0); JobClient client=new JobClient(job); RunningJob runningJob=client.submitJob(job); runningJob.waitForCompletion(); if (runningJob.isSuccessful()) { Counters counters=runningJob.getCounters(); long combinerInputRecords=counters.getGroup("org.apache.hadoop.mapreduce.TaskCounter").getCounter("COMBINE_INPUT_RECORDS"); long combinerOutputRecords=counters.getGroup("org.apache.hadoop.mapreduce.TaskCounter").getCounter("COMBINE_OUTPUT_RECORDS"); Assert.assertTrue(combinerInputRecords > 0); Assert.assertTrue(combinerInputRecords > combinerOutputRecords); BufferedReader br=new BufferedReader(new FileReader(new File(out,"part-00000"))); Set output=new HashSet(); String line=br.readLine(); Assert.assertNotNull(line); output.add(line.substring(0,1) + line.substring(4,5)); line=br.readLine(); Assert.assertNotNull(line); output.add(line.substring(0,1) + line.substring(4,5)); line=br.readLine(); Assert.assertNull(line); br.close(); Set expected=new HashSet(); expected.add("A2"); expected.add("B5"); Assert.assertEquals(expected,output); } else { Assert.fail("Job failed"); } }

Class: org.apache.hadoop.mapred.TestOldMethodsJobID

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * test Reporter.NULL */ @Test(timeout=5000) public void testReporter(){ Reporter nullReporter=Reporter.NULL; assertNull(nullReporter.getCounter(null)); assertNull(nullReporter.getCounter("group","name")); try { assertNull(nullReporter.getInputSplit()); } catch ( UnsupportedOperationException e) { assertEquals("NULL reporter has no input",e.getMessage()); } assertEquals(0,nullReporter.getProgress(),0.01); }

Class: org.apache.hadoop.mapred.TestQueue

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * test QueueManager * configuration from file * @throws IOException */ @Test(timeout=5000) public void testQueue() throws IOException { File f=null; try { f=writeFile(); QueueManager manager=new QueueManager(f.getCanonicalPath(),true); manager.setSchedulerInfo("first","queueInfo"); manager.setSchedulerInfo("second","queueInfoqueueInfo"); Queue root=manager.getRoot(); assertTrue(root.getChildren().size() == 2); Iterator iterator=root.getChildren().iterator(); Queue firstSubQueue=iterator.next(); assertTrue(firstSubQueue.getName().equals("first")); assertEquals(firstSubQueue.getAcls().get("mapred.queue.first.acl-submit-job").toString(),"Users [user1, user2] and members of the groups [group1, group2] are allowed"); Queue secondSubQueue=iterator.next(); assertTrue(secondSubQueue.getName().equals("second")); assertEquals(secondSubQueue.getProperties().getProperty("key"),"value"); assertEquals(secondSubQueue.getProperties().getProperty("key1"),"value1"); assertEquals(firstSubQueue.getState().getStateName(),"running"); assertEquals(secondSubQueue.getState().getStateName(),"stopped"); Set template=new HashSet(); template.add("first"); template.add("second"); assertEquals(manager.getLeafQueueNames(),template); UserGroupInformation mockUGI=mock(UserGroupInformation.class); when(mockUGI.getShortUserName()).thenReturn("user1"); String[] groups={"group1"}; when(mockUGI.getGroupNames()).thenReturn(groups); assertTrue(manager.hasAccess("first",QueueACL.SUBMIT_JOB,mockUGI)); assertFalse(manager.hasAccess("second",QueueACL.SUBMIT_JOB,mockUGI)); assertFalse(manager.hasAccess("first",QueueACL.ADMINISTER_JOBS,mockUGI)); when(mockUGI.getShortUserName()).thenReturn("user3"); assertTrue(manager.hasAccess("first",QueueACL.ADMINISTER_JOBS,mockUGI)); QueueAclsInfo[] qai=manager.getQueueAcls(mockUGI); assertEquals(qai.length,1); manager.refreshQueues(getConfiguration(),null); iterator=root.getChildren().iterator(); Queue firstSubQueue1=iterator.next(); Queue secondSubQueue1=iterator.next(); assertTrue(firstSubQueue.equals(firstSubQueue1)); assertEquals(firstSubQueue1.getState().getStateName(),"running"); assertEquals(secondSubQueue1.getState().getStateName(),"stopped"); assertEquals(firstSubQueue1.getSchedulingInfo(),"queueInfo"); assertEquals(secondSubQueue1.getSchedulingInfo(),"queueInfoqueueInfo"); assertEquals(firstSubQueue.getJobQueueInfo().getQueueName(),"first"); assertEquals(firstSubQueue.getJobQueueInfo().getQueueState(),"running"); assertEquals(firstSubQueue.getJobQueueInfo().getSchedulingInfo(),"queueInfo"); assertEquals(secondSubQueue.getJobQueueInfo().getChildren().size(),0); assertEquals(manager.getSchedulerInfo("first"),"queueInfo"); Set queueJobQueueInfos=new HashSet(); for ( JobQueueInfo jobInfo : manager.getJobQueueInfos()) { queueJobQueueInfos.add(jobInfo.getQueueName()); } Set rootJobQueueInfos=new HashSet(); for ( Queue queue : root.getChildren()) { rootJobQueueInfos.add(queue.getJobQueueInfo().getQueueName()); } assertEquals(queueJobQueueInfos,rootJobQueueInfos); assertEquals(manager.getJobQueueInfoMapping().get("first").getQueueName(),"first"); Writer writer=new StringWriter(); Configuration conf=getConfiguration(); conf.unset(DeprecatedQueueConfigurationParser.MAPRED_QUEUE_NAMES_KEY); QueueManager.dumpConfiguration(writer,f.getAbsolutePath(),conf); String result=writer.toString(); assertTrue(result.indexOf("\"name\":\"first\",\"state\":\"running\",\"acl_submit_job\":\"user1,user2 group1,group2\",\"acl_administer_jobs\":\"user3,user4 group3,group4\",\"properties\":[],\"children\":[]") > 0); writer=new StringWriter(); QueueManager.dumpConfiguration(writer,conf); result=writer.toString(); assertEquals("{\"queues\":[{\"name\":\"default\",\"state\":\"running\",\"acl_submit_job\":\"*\",\"acl_administer_jobs\":\"*\",\"properties\":[],\"children\":[]},{\"name\":\"q1\",\"state\":\"running\",\"acl_submit_job\":\" \",\"acl_administer_jobs\":\" \",\"properties\":[],\"children\":[{\"name\":\"q1:q2\",\"state\":\"running\",\"acl_submit_job\":\" \",\"acl_administer_jobs\":\" \",\"properties\":[{\"key\":\"capacity\",\"value\":\"20\"},{\"key\":\"user-limit\",\"value\":\"30\"}],\"children\":[]}]}]}",result); QueueAclsInfo qi=new QueueAclsInfo(); assertNull(qi.getQueueName()); } finally { if (f != null) { f.delete(); } } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * test for Qmanager with empty configuration * @throws IOException */ @Test(timeout=5000) public void test2Queue() throws IOException { Configuration conf=getConfiguration(); QueueManager manager=new QueueManager(conf); manager.setSchedulerInfo("first","queueInfo"); manager.setSchedulerInfo("second","queueInfoqueueInfo"); Queue root=manager.getRoot(); assertTrue(root.getChildren().size() == 2); Iterator iterator=root.getChildren().iterator(); Queue firstSubQueue=iterator.next(); assertTrue(firstSubQueue.getName().equals("first")); assertEquals(firstSubQueue.getAcls().get("mapred.queue.first.acl-submit-job").toString(),"Users [user1, user2] and members of the groups [group1, group2] are allowed"); Queue secondSubQueue=iterator.next(); assertTrue(secondSubQueue.getName().equals("second")); assertEquals(firstSubQueue.getState().getStateName(),"running"); assertEquals(secondSubQueue.getState().getStateName(),"stopped"); assertTrue(manager.isRunning("first")); assertFalse(manager.isRunning("second")); assertEquals(firstSubQueue.getSchedulingInfo(),"queueInfo"); assertEquals(secondSubQueue.getSchedulingInfo(),"queueInfoqueueInfo"); Set template=new HashSet(); template.add("first"); template.add("second"); assertEquals(manager.getLeafQueueNames(),template); }

Class: org.apache.hadoop.mapred.TestResourceMgrDelegate

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void tesAllJobs() throws Exception { final ApplicationClientProtocol applicationsManager=Mockito.mock(ApplicationClientProtocol.class); GetApplicationsResponse allApplicationsResponse=Records.newRecord(GetApplicationsResponse.class); List applications=new ArrayList(); applications.add(getApplicationReport(YarnApplicationState.FINISHED,FinalApplicationStatus.FAILED)); applications.add(getApplicationReport(YarnApplicationState.FINISHED,FinalApplicationStatus.SUCCEEDED)); applications.add(getApplicationReport(YarnApplicationState.FINISHED,FinalApplicationStatus.KILLED)); applications.add(getApplicationReport(YarnApplicationState.FAILED,FinalApplicationStatus.FAILED)); allApplicationsResponse.setApplicationList(applications); Mockito.when(applicationsManager.getApplications(Mockito.any(GetApplicationsRequest.class))).thenReturn(allApplicationsResponse); ResourceMgrDelegate resourceMgrDelegate=new ResourceMgrDelegate(new YarnConfiguration()){ @Override protected void serviceStart() throws Exception { Assert.assertTrue(this.client instanceof YarnClientImpl); ((YarnClientImpl)this.client).setRMClient(applicationsManager); } } ; JobStatus[] allJobs=resourceMgrDelegate.getAllJobs(); Assert.assertEquals(State.FAILED,allJobs[0].getState()); Assert.assertEquals(State.SUCCEEDED,allJobs[1].getState()); Assert.assertEquals(State.KILLED,allJobs[2].getState()); Assert.assertEquals(State.FAILED,allJobs[3].getState()); }

Class: org.apache.hadoop.mapred.TestShuffleHandler

UtilityVerifier EqualityVerifier HybridVerifier 
/** * Validate the limit on number of shuffle connections. * @throws Exception exception */ @Test(timeout=10000) public void testMaxConnections() throws Exception { Configuration conf=new Configuration(); conf.setInt(ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY,0); conf.setInt(ShuffleHandler.MAX_SHUFFLE_CONNECTIONS,3); ShuffleHandler shuffleHandler=new ShuffleHandler(){ @Override protected Shuffle getShuffle( Configuration conf){ return new Shuffle(conf){ @Override protected MapOutputInfo getMapOutputInfo( String base, String mapId, int reduce, String user) throws IOException { return null; } @Override protected void populateHeaders( List mapIds, String jobId, String user, int reduce, HttpRequest request, HttpResponse response, boolean keepAliveParam, Map infoMap) throws IOException { } @Override protected void verifyRequest( String appid, ChannelHandlerContext ctx, HttpRequest request, HttpResponse response, URL requestUri) throws IOException { } @Override protected ChannelFuture sendMapOutput( ChannelHandlerContext ctx, Channel ch, String user, String mapId, int reduce, MapOutputInfo info) throws IOException { ShuffleHeader header=new ShuffleHeader("dummy_header",5678,5678,1); DataOutputBuffer dob=new DataOutputBuffer(); header.write(dob); ch.write(wrappedBuffer(dob.getData(),0,dob.getLength())); dob=new DataOutputBuffer(); for (int i=0; i < 100000; ++i) { header.write(dob); } return ch.write(wrappedBuffer(dob.getData(),0,dob.getLength())); } } ; } } ; shuffleHandler.init(conf); shuffleHandler.start(); int connAttempts=3; HttpURLConnection conns[]=new HttpURLConnection[connAttempts]; for (int i=0; i < connAttempts; i++) { String URLstring="http://127.0.0.1:" + shuffleHandler.getConfig().get(ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY) + "/mapOutput?job=job_12345_1&reduce=1&map=attempt_12345_1_m_"+ i+ "_0"; URL url=new URL(URLstring); conns[i]=(HttpURLConnection)url.openConnection(); conns[i].setRequestProperty(ShuffleHeader.HTTP_HEADER_NAME,ShuffleHeader.DEFAULT_HTTP_HEADER_NAME); conns[i].setRequestProperty(ShuffleHeader.HTTP_HEADER_VERSION,ShuffleHeader.DEFAULT_HTTP_HEADER_VERSION); } for (int i=0; i < connAttempts; i++) { conns[i].connect(); } conns[0].getInputStream(); int rc=conns[0].getResponseCode(); Assert.assertEquals(HttpURLConnection.HTTP_OK,rc); conns[1].getInputStream(); rc=conns[1].getResponseCode(); Assert.assertEquals(HttpURLConnection.HTTP_OK,rc); try { conns[2].getInputStream(); rc=conns[2].getResponseCode(); Assert.fail("Expected a SocketException"); } catch ( SocketException se) { LOG.info("Expected - connection should not be open"); } catch ( Exception e) { Assert.fail("Expected a SocketException"); } shuffleHandler.stop(); }

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testRecoveryFromOtherVersions() throws IOException { final String user="someuser"; final ApplicationId appId=ApplicationId.newInstance(12345,1); final File tmpDir=new File(System.getProperty("test.build.data",System.getProperty("java.io.tmpdir")),TestShuffleHandler.class.getName()); Configuration conf=new Configuration(); conf.setInt(ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY,0); conf.setInt(ShuffleHandler.MAX_SHUFFLE_CONNECTIONS,3); ShuffleHandler shuffle=new ShuffleHandler(); shuffle.setRecoveryPath(new Path(tmpDir.toString())); tmpDir.mkdirs(); try { shuffle.init(conf); shuffle.start(); DataOutputBuffer outputBuffer=new DataOutputBuffer(); outputBuffer.reset(); Token jt=new Token("identifier".getBytes(),"password".getBytes(),new Text(user),new Text("shuffleService")); jt.write(outputBuffer); shuffle.initializeApplication(new ApplicationInitializationContext(user,appId,ByteBuffer.wrap(outputBuffer.getData(),0,outputBuffer.getLength()))); int rc=getShuffleResponseCode(shuffle,jt); Assert.assertEquals(HttpURLConnection.HTTP_OK,rc); shuffle.close(); shuffle=new ShuffleHandler(); shuffle.setRecoveryPath(new Path(tmpDir.toString())); shuffle.init(conf); shuffle.start(); rc=getShuffleResponseCode(shuffle,jt); Assert.assertEquals(HttpURLConnection.HTTP_OK,rc); Version version=Version.newInstance(1,0); Assert.assertEquals(version,shuffle.getCurrentVersion()); Version version11=Version.newInstance(1,1); shuffle.storeVersion(version11); Assert.assertEquals(version11,shuffle.loadVersion()); shuffle.close(); shuffle=new ShuffleHandler(); shuffle.setRecoveryPath(new Path(tmpDir.toString())); shuffle.init(conf); shuffle.start(); Assert.assertEquals(version,shuffle.loadVersion()); rc=getShuffleResponseCode(shuffle,jt); Assert.assertEquals(HttpURLConnection.HTTP_OK,rc); Version version21=Version.newInstance(2,1); shuffle.storeVersion(version21); Assert.assertEquals(version21,shuffle.loadVersion()); shuffle.close(); shuffle=new ShuffleHandler(); shuffle.setRecoveryPath(new Path(tmpDir.toString())); shuffle.init(conf); try { shuffle.start(); Assert.fail("Incompatible version, should expect fail here."); } catch ( ServiceStateException e) { Assert.assertTrue("Exception message mismatch",e.getMessage().contains("Incompatible version for state DB schema:")); } } finally { if (shuffle != null) { shuffle.close(); } FileUtil.fullyDelete(tmpDir); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Verify client prematurely closing a connection. * @throws Exception exception. */ @Test(timeout=10000) public void testClientClosesConnection() throws Exception { final ArrayList failures=new ArrayList(1); Configuration conf=new Configuration(); conf.setInt(ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY,0); ShuffleHandler shuffleHandler=new ShuffleHandler(){ @Override protected Shuffle getShuffle( Configuration conf){ return new Shuffle(conf){ @Override protected MapOutputInfo getMapOutputInfo( String base, String mapId, int reduce, String user) throws IOException { return null; } @Override protected void populateHeaders( List mapIds, String jobId, String user, int reduce, HttpRequest request, HttpResponse response, boolean keepAliveParam, Map infoMap) throws IOException { super.setResponseHeaders(response,keepAliveParam,100); } @Override protected void verifyRequest( String appid, ChannelHandlerContext ctx, HttpRequest request, HttpResponse response, URL requestUri) throws IOException { } @Override protected ChannelFuture sendMapOutput( ChannelHandlerContext ctx, Channel ch, String user, String mapId, int reduce, MapOutputInfo info) throws IOException { ShuffleHeader header=new ShuffleHeader("attempt_12345_1_m_1_0",5678,5678,1); DataOutputBuffer dob=new DataOutputBuffer(); header.write(dob); ch.write(wrappedBuffer(dob.getData(),0,dob.getLength())); dob=new DataOutputBuffer(); for (int i=0; i < 100000; ++i) { header.write(dob); } return ch.write(wrappedBuffer(dob.getData(),0,dob.getLength())); } @Override protected void sendError( ChannelHandlerContext ctx, HttpResponseStatus status){ if (failures.size() == 0) { failures.add(new Error()); ctx.getChannel().close(); } } @Override protected void sendError( ChannelHandlerContext ctx, String message, HttpResponseStatus status){ if (failures.size() == 0) { failures.add(new Error()); ctx.getChannel().close(); } } } ; } } ; shuffleHandler.init(conf); shuffleHandler.start(); URL url=new URL("http://127.0.0.1:" + shuffleHandler.getConfig().get(ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY) + "/mapOutput?job=job_12345_1&reduce=1&map=attempt_12345_1_m_1_0"); HttpURLConnection conn=(HttpURLConnection)url.openConnection(); conn.setRequestProperty(ShuffleHeader.HTTP_HEADER_NAME,ShuffleHeader.DEFAULT_HTTP_HEADER_NAME); conn.setRequestProperty(ShuffleHeader.HTTP_HEADER_VERSION,ShuffleHeader.DEFAULT_HTTP_HEADER_VERSION); conn.connect(); DataInputStream input=new DataInputStream(conn.getInputStream()); Assert.assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode()); Assert.assertEquals("close",conn.getHeaderField(HttpHeaders.CONNECTION)); ShuffleHeader header=new ShuffleHeader(); header.readFields(input); input.close(); shuffleHandler.stop(); Assert.assertTrue("sendError called when client closed connection",failures.size() == 0); }

APIUtilityVerifier BooleanVerifier AssumptionSetter HybridVerifier 
/** * Validate the ownership of the map-output files being pulled in. The * local-file-system owner of the file should match the user component in the * @throws Exception exception */ @Test(timeout=100000) public void testMapFileAccess() throws IOException { assumeTrue(NativeIO.isAvailable()); Configuration conf=new Configuration(); conf.setInt(ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY,0); conf.setInt(ShuffleHandler.MAX_SHUFFLE_CONNECTIONS,3); conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,"kerberos"); UserGroupInformation.setConfiguration(conf); File absLogDir=new File("target",TestShuffleHandler.class.getSimpleName() + "LocDir").getAbsoluteFile(); conf.set(YarnConfiguration.NM_LOCAL_DIRS,absLogDir.getAbsolutePath()); ApplicationId appId=ApplicationId.newInstance(12345,1); LOG.info(appId.toString()); String appAttemptId="attempt_12345_1_m_1_0"; String user="randomUser"; String reducerId="0"; List fileMap=new ArrayList(); createShuffleHandlerFiles(absLogDir,user,appId.toString(),appAttemptId,conf,fileMap); ShuffleHandler shuffleHandler=new ShuffleHandler(){ @Override protected Shuffle getShuffle( Configuration conf){ return new Shuffle(conf){ @Override protected void verifyRequest( String appid, ChannelHandlerContext ctx, HttpRequest request, HttpResponse response, URL requestUri) throws IOException { } } ; } } ; shuffleHandler.init(conf); try { shuffleHandler.start(); DataOutputBuffer outputBuffer=new DataOutputBuffer(); outputBuffer.reset(); Token jt=new Token("identifier".getBytes(),"password".getBytes(),new Text(user),new Text("shuffleService")); jt.write(outputBuffer); shuffleHandler.initializeApplication(new ApplicationInitializationContext(user,appId,ByteBuffer.wrap(outputBuffer.getData(),0,outputBuffer.getLength()))); URL url=new URL("http://127.0.0.1:" + shuffleHandler.getConfig().get(ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY) + "/mapOutput?job=job_12345_0001&reduce="+ reducerId+ "&map=attempt_12345_1_m_1_0"); HttpURLConnection conn=(HttpURLConnection)url.openConnection(); conn.setRequestProperty(ShuffleHeader.HTTP_HEADER_NAME,ShuffleHeader.DEFAULT_HTTP_HEADER_NAME); conn.setRequestProperty(ShuffleHeader.HTTP_HEADER_VERSION,ShuffleHeader.DEFAULT_HTTP_HEADER_VERSION); conn.connect(); byte[] byteArr=new byte[10000]; try { DataInputStream is=new DataInputStream(conn.getInputStream()); is.readFully(byteArr); } catch ( EOFException e) { } FileInputStream is=new FileInputStream(fileMap.get(0)); String owner=NativeIO.POSIX.getFstat(is.getFD()).getOwner(); is.close(); String message="Owner '" + owner + "' for path "+ fileMap.get(0).getAbsolutePath()+ " did not match expected owner '"+ user+ "'"; Assert.assertTrue((new String(byteArr)).contains(message)); } finally { shuffleHandler.stop(); } }

Class: org.apache.hadoop.mapred.TestSkipBadRecords

BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=5000) public void testSkipBadRecords(){ Configuration conf=new Configuration(); assertEquals(2,SkipBadRecords.getAttemptsToStartSkipping(conf)); assertTrue(SkipBadRecords.getAutoIncrMapperProcCount(conf)); assertTrue(SkipBadRecords.getAutoIncrReducerProcCount(conf)); assertEquals(0,SkipBadRecords.getMapperMaxSkipRecords(conf)); assertEquals(0,SkipBadRecords.getReducerMaxSkipGroups(conf),0); assertNull(SkipBadRecords.getSkipOutputPath(conf)); SkipBadRecords.setAttemptsToStartSkipping(conf,5); SkipBadRecords.setAutoIncrMapperProcCount(conf,false); SkipBadRecords.setAutoIncrReducerProcCount(conf,false); SkipBadRecords.setMapperMaxSkipRecords(conf,6L); SkipBadRecords.setReducerMaxSkipGroups(conf,7L); JobConf jc=new JobConf(); SkipBadRecords.setSkipOutputPath(jc,new Path("test")); assertEquals(5,SkipBadRecords.getAttemptsToStartSkipping(conf)); assertFalse(SkipBadRecords.getAutoIncrMapperProcCount(conf)); assertFalse(SkipBadRecords.getAutoIncrReducerProcCount(conf)); assertEquals(6L,SkipBadRecords.getMapperMaxSkipRecords(conf)); assertEquals(7L,SkipBadRecords.getReducerMaxSkipGroups(conf),0); assertEquals("test",SkipBadRecords.getSkipOutputPath(jc).toString()); }

Class: org.apache.hadoop.mapred.TestTaskAttemptListenerImpl

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testCheckpointIDTracking() throws IOException, InterruptedException { SystemClock clock=new SystemClock(); org.apache.hadoop.mapreduce.v2.app.job.Task mockTask=mock(org.apache.hadoop.mapreduce.v2.app.job.Task.class); when(mockTask.canCommit(any(TaskAttemptId.class))).thenReturn(true); Job mockJob=mock(Job.class); when(mockJob.getTask(any(TaskId.class))).thenReturn(mockTask); Dispatcher dispatcher=mock(Dispatcher.class); EventHandler ea=mock(EventHandler.class); when(dispatcher.getEventHandler()).thenReturn(ea); RMHeartbeatHandler rmHeartbeatHandler=mock(RMHeartbeatHandler.class); AppContext appCtx=mock(AppContext.class); when(appCtx.getJob(any(JobId.class))).thenReturn(mockJob); when(appCtx.getClock()).thenReturn(clock); when(appCtx.getEventHandler()).thenReturn(ea); JobTokenSecretManager secret=mock(JobTokenSecretManager.class); final TaskHeartbeatHandler hbHandler=mock(TaskHeartbeatHandler.class); when(appCtx.getEventHandler()).thenReturn(ea); CheckpointAMPreemptionPolicy policy=new CheckpointAMPreemptionPolicy(); policy.init(appCtx); TaskAttemptListenerImpl listener=new MockTaskAttemptListenerImpl(appCtx,secret,rmHeartbeatHandler,policy){ @Override protected void registerHeartbeatHandler( Configuration conf){ taskHeartbeatHandler=hbHandler; } } ; Configuration conf=new Configuration(); conf.setBoolean(MRJobConfig.TASK_PREEMPTION,true); listener.init(conf); listener.start(); TaskAttemptID tid=new TaskAttemptID("12345",1,TaskType.REDUCE,1,0); List partialOut=new ArrayList(); partialOut.add(new Path("/prev1")); partialOut.add(new Path("/prev2")); Counters counters=mock(Counters.class); final long CBYTES=64L * 1024 * 1024; final long CTIME=4344L; final Path CLOC=new Path("/test/1"); Counter cbytes=mock(Counter.class); when(cbytes.getValue()).thenReturn(CBYTES); Counter ctime=mock(Counter.class); when(ctime.getValue()).thenReturn(CTIME); when(counters.findCounter(eq(EnumCounter.CHECKPOINT_BYTES))).thenReturn(cbytes); when(counters.findCounter(eq(EnumCounter.CHECKPOINT_MS))).thenReturn(ctime); TaskCheckpointID incid=new TaskCheckpointID(new FSCheckpointID(CLOC),partialOut,counters); listener.setCheckpointID(org.apache.hadoop.mapred.TaskID.downgrade(tid.getTaskID()),incid); CheckpointID outcid=listener.getCheckpointID(tid.getTaskID()); TaskCheckpointID tcid=(TaskCheckpointID)outcid; assertEquals(CBYTES,tcid.getCheckpointBytes()); assertEquals(CTIME,tcid.getCheckpointTime()); assertTrue(partialOut.containsAll(tcid.getPartialCommittedOutput())); assertTrue(tcid.getPartialCommittedOutput().containsAll(partialOut)); assert outcid == incid; listener.stop(); }

UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=5000) public void testGetTask() throws IOException { AppContext appCtx=mock(AppContext.class); JobTokenSecretManager secret=mock(JobTokenSecretManager.class); RMHeartbeatHandler rmHeartbeatHandler=mock(RMHeartbeatHandler.class); TaskHeartbeatHandler hbHandler=mock(TaskHeartbeatHandler.class); Dispatcher dispatcher=mock(Dispatcher.class); EventHandler ea=mock(EventHandler.class); when(dispatcher.getEventHandler()).thenReturn(ea); when(appCtx.getEventHandler()).thenReturn(ea); CheckpointAMPreemptionPolicy policy=new CheckpointAMPreemptionPolicy(); policy.init(appCtx); MockTaskAttemptListenerImpl listener=new MockTaskAttemptListenerImpl(appCtx,secret,rmHeartbeatHandler,hbHandler,policy); Configuration conf=new Configuration(); listener.init(conf); listener.start(); JVMId id=new JVMId("foo",1,true,1); WrappedJvmID wid=new WrappedJvmID(id.getJobId(),id.isMap,id.getId()); JvmContext context=new JvmContext(); context.jvmId=id; JvmTask result=listener.getTask(context); assertNotNull(result); assertTrue(result.shouldDie); TaskAttemptId attemptID=mock(TaskAttemptId.class); Task task=mock(Task.class); listener.registerPendingTask(task,wid); result=listener.getTask(context); assertNull(result); listener.unregister(attemptID,wid); listener.registerPendingTask(task,wid); listener.registerLaunchedTask(attemptID,wid); verify(hbHandler).register(attemptID); result=listener.getTask(context); assertNotNull(result); assertFalse(result.shouldDie); result=listener.getTask(context); assertNotNull(result); assertTrue(result.shouldDie); listener.unregister(attemptID,wid); result=listener.getTask(context); assertNotNull(result); assertTrue(result.shouldDie); listener.stop(); JVMId jvmid=JVMId.forName("jvm_001_002_m_004"); assertNotNull(jvmid); try { JVMId.forName("jvm_001_002_m_004_006"); fail(); } catch ( IllegalArgumentException e) { assertEquals(e.getMessage(),"TaskId string : jvm_001_002_m_004_006 is not properly formed"); } }

Class: org.apache.hadoop.mapred.TestTaskLog

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * test without TASK_LOG_DIR * @throws IOException */ @Test(timeout=50000) public void testTaskLogWithoutTaskLogDir() throws IOException { System.clearProperty(YarnConfiguration.YARN_APP_CONTAINER_LOG_DIR); assertEquals(TaskLog.getMRv2LogDir(),null); TaskAttemptID taid=mock(TaskAttemptID.class); JobID jid=new JobID("job",1); when(taid.getJobID()).thenReturn(jid); when(taid.toString()).thenReturn("JobId"); File f=TaskLog.getTaskLogFile(taid,true,LogName.STDOUT); assertTrue(f.getAbsolutePath().endsWith("stdout")); }

APIUtilityVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * test TaskAttemptID * @throws IOException */ @Test(timeout=50000) public void testTaskLog() throws IOException { System.setProperty(YarnConfiguration.YARN_APP_CONTAINER_LOG_DIR,"testString"); assertEquals(TaskLog.getMRv2LogDir(),"testString"); TaskAttemptID taid=mock(TaskAttemptID.class); JobID jid=new JobID("job",1); when(taid.getJobID()).thenReturn(jid); when(taid.toString()).thenReturn("JobId"); File f=TaskLog.getTaskLogFile(taid,true,LogName.STDOUT); assertTrue(f.getAbsolutePath().endsWith("testString" + File.separatorChar + "stdout")); File indexFile=TaskLog.getIndexFile(taid,true); if (!indexFile.getParentFile().exists()) { indexFile.getParentFile().mkdirs(); } indexFile.delete(); indexFile.createNewFile(); TaskLog.syncLogs("location",taid,true); assertTrue(indexFile.getAbsolutePath().endsWith("userlogs" + File.separatorChar + "job_job_0001"+ File.separatorChar+ "JobId.cleanup"+ File.separatorChar+ "log.index")); f=TaskLog.getRealTaskLogFileLocation(taid,true,LogName.DEBUGOUT); if (f != null) { assertTrue(f.getAbsolutePath().endsWith("location" + File.separatorChar + "debugout")); FileUtils.copyFile(indexFile,f); } assertTrue(TaskLog.obtainLogDirOwner(taid).length() > 0); assertTrue(readTaskLog(TaskLog.LogName.DEBUGOUT,taid,true).length() > 0); }

Class: org.apache.hadoop.mapred.TestTaskLogAppender

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * test TaskLogAppender */ @SuppressWarnings("deprecation") @Test(timeout=5000) public void testTaskLogAppender(){ TaskLogAppender appender=new TaskLogAppender(); System.setProperty(TaskLogAppender.TASKID_PROPERTY,"attempt_01_02_m03_04_001"); System.setProperty(TaskLogAppender.LOGSIZE_PROPERTY,"1003"); appender.activateOptions(); assertEquals(appender.getTaskId(),"attempt_01_02_m03_04_001"); assertEquals(appender.getTotalLogFileSize(),1000); assertEquals(appender.getIsCleanup(),false); Writer writer=new StringWriter(); appender.setWriter(writer); Layout layout=new PatternLayout("%-5p [%t]: %m%n"); appender.setLayout(layout); Category logger=Logger.getLogger(getClass().getName()); LoggingEvent event=new LoggingEvent("fqnOfCategoryClass",logger,Priority.INFO,"message",new Throwable()); appender.append(event); appender.flush(); appender.close(); assertTrue(writer.toString().length() > 0); appender=new TaskLogAppender(); appender.setIsCleanup(true); appender.activateOptions(); assertEquals(appender.getIsCleanup(),true); }

Class: org.apache.hadoop.mapred.TestTextInputFormat

APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=500000) public void testFormat() throws Exception { JobConf job=new JobConf(defaultConf); Path file=new Path(workDir,"test.txt"); Reporter reporter=Reporter.NULL; int seed=new Random().nextInt(); LOG.info("seed = " + seed); Random random=new Random(seed); localFs.delete(workDir,true); FileInputFormat.setInputPaths(job,workDir); for (int length=0; length < MAX_LENGTH; length+=random.nextInt(MAX_LENGTH / 10) + 1) { LOG.debug("creating; entries = " + length); Writer writer=new OutputStreamWriter(localFs.create(file)); try { for (int i=0; i < length; i++) { writer.write(Integer.toString(i)); writer.write("\n"); } } finally { writer.close(); } TextInputFormat format=new TextInputFormat(); format.configure(job); LongWritable key=new LongWritable(); Text value=new Text(); for (int i=0; i < 3; i++) { int numSplits=random.nextInt(MAX_LENGTH / 20) + 1; LOG.debug("splitting: requesting = " + numSplits); InputSplit[] splits=format.getSplits(job,numSplits); LOG.debug("splitting: got = " + splits.length); if (length == 0) { assertEquals("Files of length 0 are not returned from FileInputFormat.getSplits().",1,splits.length); assertEquals("Empty file length == 0",0,splits[0].getLength()); } BitSet bits=new BitSet(length); for (int j=0; j < splits.length; j++) { LOG.debug("split[" + j + "]= "+ splits[j]); RecordReader reader=format.getRecordReader(splits[j],job,reporter); try { int count=0; while (reader.next(key,value)) { int v=Integer.parseInt(value.toString()); LOG.debug("read " + v); if (bits.get(v)) { LOG.warn("conflict with " + v + " in split "+ j+ " at position "+ reader.getPos()); } assertFalse("Key in multiple partitions.",bits.get(v)); bits.set(v); count++; } LOG.debug("splits[" + j + "]="+ splits[j]+ " count="+ count); } finally { reader.close(); } } assertEquals("Some keys in no partition.",length,bits.cardinality()); } } }

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=900000) public void testSplitableCodecs() throws IOException { JobConf conf=new JobConf(defaultConf); int seed=new Random().nextInt(); CompressionCodec codec=null; try { codec=(CompressionCodec)ReflectionUtils.newInstance(conf.getClassByName("org.apache.hadoop.io.compress.BZip2Codec"),conf); } catch ( ClassNotFoundException cnfe) { throw new IOException("Illegal codec!"); } Path file=new Path(workDir,"test" + codec.getDefaultExtension()); Reporter reporter=Reporter.NULL; LOG.info("seed = " + seed); Random random=new Random(seed); FileSystem localFs=FileSystem.getLocal(conf); localFs.delete(workDir,true); FileInputFormat.setInputPaths(conf,workDir); final int MAX_LENGTH=500000; for (int length=MAX_LENGTH / 2; length < MAX_LENGTH; length+=random.nextInt(MAX_LENGTH / 4) + 1) { LOG.info("creating; entries = " + length); Writer writer=new OutputStreamWriter(codec.createOutputStream(localFs.create(file))); try { for (int i=0; i < length; i++) { writer.write(Integer.toString(i)); writer.write("\n"); } } finally { writer.close(); } TextInputFormat format=new TextInputFormat(); format.configure(conf); LongWritable key=new LongWritable(); Text value=new Text(); for (int i=0; i < 3; i++) { int numSplits=random.nextInt(MAX_LENGTH / 2000) + 1; LOG.info("splitting: requesting = " + numSplits); InputSplit[] splits=format.getSplits(conf,numSplits); LOG.info("splitting: got = " + splits.length); BitSet bits=new BitSet(length); for (int j=0; j < splits.length; j++) { LOG.debug("split[" + j + "]= "+ splits[j]); RecordReader reader=format.getRecordReader(splits[j],conf,reporter); try { int counter=0; while (reader.next(key,value)) { int v=Integer.parseInt(value.toString()); LOG.debug("read " + v); if (bits.get(v)) { LOG.warn("conflict with " + v + " in split "+ j+ " at position "+ reader.getPos()); } assertFalse("Key in multiple partitions.",bits.get(v)); bits.set(v); counter++; } if (counter > 0) { LOG.info("splits[" + j + "]="+ splits[j]+ " count="+ counter); } else { LOG.debug("splits[" + j + "]="+ splits[j]+ " count="+ counter); } } finally { reader.close(); } } assertEquals("Some keys in no partition.",length,bits.cardinality()); } } }

Class: org.apache.hadoop.mapred.TestTextOutputFormat

APIUtilityVerifier BranchVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testFormat() throws Exception { JobConf job=new JobConf(); job.set(JobContext.TASK_ATTEMPT_ID,attempt); FileOutputFormat.setOutputPath(job,workDir.getParent().getParent()); FileOutputFormat.setWorkOutputPath(job,workDir); FileSystem fs=workDir.getFileSystem(job); if (!fs.mkdirs(workDir)) { fail("Failed to create output directory"); } String file="test_format.txt"; Reporter reporter=Reporter.NULL; TextOutputFormat theOutputFormat=new TextOutputFormat(); RecordWriter theRecordWriter=theOutputFormat.getRecordWriter(localFs,job,file,reporter); Text key1=new Text("key1"); Text key2=new Text("key2"); Text val1=new Text("val1"); Text val2=new Text("val2"); NullWritable nullWritable=NullWritable.get(); try { theRecordWriter.write(key1,val1); theRecordWriter.write(null,nullWritable); theRecordWriter.write(null,val1); theRecordWriter.write(nullWritable,val2); theRecordWriter.write(key2,nullWritable); theRecordWriter.write(key1,null); theRecordWriter.write(null,null); theRecordWriter.write(key2,val2); } finally { theRecordWriter.close(reporter); } File expectedFile=new File(new Path(workDir,file).toString()); StringBuffer expectedOutput=new StringBuffer(); expectedOutput.append(key1).append('\t').append(val1).append("\n"); expectedOutput.append(val1).append("\n"); expectedOutput.append(val2).append("\n"); expectedOutput.append(key2).append("\n"); expectedOutput.append(key1).append("\n"); expectedOutput.append(key2).append('\t').append(val2).append("\n"); String output=UtilsForTests.slurp(expectedFile); assertEquals(expectedOutput.toString(),output); }

BranchVerifier UtilityVerifier EqualityVerifier HybridVerifier 
/** * test compressed file * @throws IOException */ @Test public void testCompress() throws IOException { JobConf job=new JobConf(); job.set(JobContext.TASK_ATTEMPT_ID,attempt); job.set(org.apache.hadoop.mapreduce.lib.output.FileOutputFormat.COMPRESS,"true"); FileOutputFormat.setOutputPath(job,workDir.getParent().getParent()); FileOutputFormat.setWorkOutputPath(job,workDir); FileSystem fs=workDir.getFileSystem(job); if (!fs.mkdirs(workDir)) { fail("Failed to create output directory"); } String file="test_compress.txt"; Reporter reporter=Reporter.NULL; TextOutputFormat theOutputFormat=new TextOutputFormat(); RecordWriter theRecordWriter=theOutputFormat.getRecordWriter(localFs,job,file,reporter); Text key1=new Text("key1"); Text key2=new Text("key2"); Text val1=new Text("val1"); Text val2=new Text("val2"); NullWritable nullWritable=NullWritable.get(); try { theRecordWriter.write(key1,val1); theRecordWriter.write(null,nullWritable); theRecordWriter.write(null,val1); theRecordWriter.write(nullWritable,val2); theRecordWriter.write(key2,nullWritable); theRecordWriter.write(key1,null); theRecordWriter.write(null,null); theRecordWriter.write(key2,val2); } finally { theRecordWriter.close(reporter); } StringBuffer expectedOutput=new StringBuffer(); expectedOutput.append(key1).append("\t").append(val1).append("\n"); expectedOutput.append(val1).append("\n"); expectedOutput.append(val2).append("\n"); expectedOutput.append(key2).append("\n"); expectedOutput.append(key1).append("\n"); expectedOutput.append(key2).append("\t").append(val2).append("\n"); DefaultCodec codec=new DefaultCodec(); codec.setConf(job); Path expectedFile=new Path(workDir,file + codec.getDefaultExtension()); final FileInputStream istream=new FileInputStream(expectedFile.toString()); CompressionInputStream cistream=codec.createInputStream(istream); LineReader reader=new LineReader(cistream); String output=""; Text out=new Text(); while (reader.readLine(out) > 0) { output+=out; output+="\n"; } reader.close(); assertEquals(expectedOutput.toString(),output); }

APIUtilityVerifier BranchVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testFormatWithCustomSeparator() throws Exception { JobConf job=new JobConf(); String separator="\u0001"; job.set("mapreduce.output.textoutputformat.separator",separator); job.set(JobContext.TASK_ATTEMPT_ID,attempt); FileOutputFormat.setOutputPath(job,workDir.getParent().getParent()); FileOutputFormat.setWorkOutputPath(job,workDir); FileSystem fs=workDir.getFileSystem(job); if (!fs.mkdirs(workDir)) { fail("Failed to create output directory"); } String file="test_custom.txt"; Reporter reporter=Reporter.NULL; TextOutputFormat theOutputFormat=new TextOutputFormat(); RecordWriter theRecordWriter=theOutputFormat.getRecordWriter(localFs,job,file,reporter); Text key1=new Text("key1"); Text key2=new Text("key2"); Text val1=new Text("val1"); Text val2=new Text("val2"); NullWritable nullWritable=NullWritable.get(); try { theRecordWriter.write(key1,val1); theRecordWriter.write(null,nullWritable); theRecordWriter.write(null,val1); theRecordWriter.write(nullWritable,val2); theRecordWriter.write(key2,nullWritable); theRecordWriter.write(key1,null); theRecordWriter.write(null,null); theRecordWriter.write(key2,val2); } finally { theRecordWriter.close(reporter); } File expectedFile=new File(new Path(workDir,file).toString()); StringBuffer expectedOutput=new StringBuffer(); expectedOutput.append(key1).append(separator).append(val1).append("\n"); expectedOutput.append(val1).append("\n"); expectedOutput.append(val2).append("\n"); expectedOutput.append(key2).append("\n"); expectedOutput.append(key1).append("\n"); expectedOutput.append(key2).append(separator).append(val2).append("\n"); String output=UtilsForTests.slurp(expectedFile); assertEquals(expectedOutput.toString(),output); }

Class: org.apache.hadoop.mapred.TestYARNRunner

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testAMStandardEnv() throws Exception { final String ADMIN_LIB_PATH="foo"; final String USER_LIB_PATH="bar"; final String USER_SHELL="shell"; JobConf jobConf=new JobConf(); jobConf.set(MRJobConfig.MR_AM_ADMIN_USER_ENV,"LD_LIBRARY_PATH=" + ADMIN_LIB_PATH); jobConf.set(MRJobConfig.MR_AM_ENV,"LD_LIBRARY_PATH=" + USER_LIB_PATH); jobConf.set(MRJobConfig.MAPRED_ADMIN_USER_SHELL,USER_SHELL); YARNRunner yarnRunner=new YARNRunner(jobConf); ApplicationSubmissionContext appSubCtx=buildSubmitContext(yarnRunner,jobConf); ContainerLaunchContext clc=appSubCtx.getAMContainerSpec(); Map env=clc.getEnvironment(); String libPath=env.get(Environment.LD_LIBRARY_PATH.name()); assertNotNull("LD_LIBRARY_PATH not set",libPath); String cps=jobConf.getBoolean(MRConfig.MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM,MRConfig.DEFAULT_MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM) ? ApplicationConstants.CLASS_PATH_SEPARATOR : File.pathSeparator; assertEquals("Bad AM LD_LIBRARY_PATH setting",MRApps.crossPlatformifyMREnv(conf,Environment.PWD) + cps + ADMIN_LIB_PATH+ cps+ USER_LIB_PATH,libPath); String shell=env.get(Environment.SHELL.name()); assertNotNull("SHELL not set",shell); assertEquals("Bad SHELL setting",USER_SHELL,shell); }

Class: org.apache.hadoop.mapred.gridmix.TestCompressionEmulationUtils

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test if {@link CompressionEmulationUtil#configureCompressionEmulation(org.apache.hadoop.mapred.JobConf,org.apache.hadoop.mapred.JobConf)}can extract compression related configuration parameters. */ @Test public void testExtractCompressionConfigs(){ JobConf source=new JobConf(); JobConf target=new JobConf(); source.setBoolean(FileOutputFormat.COMPRESS,false); source.set(FileOutputFormat.COMPRESS_CODEC,"MyDefaultCodec"); source.set(FileOutputFormat.COMPRESS_TYPE,"MyDefaultType"); source.setBoolean(MRJobConfig.MAP_OUTPUT_COMPRESS,false); source.set(MRJobConfig.MAP_OUTPUT_COMPRESS_CODEC,"MyDefaultCodec2"); CompressionEmulationUtil.configureCompressionEmulation(source,target); assertFalse(target.getBoolean(FileOutputFormat.COMPRESS,true)); assertEquals("MyDefaultCodec",target.get(FileOutputFormat.COMPRESS_CODEC)); assertEquals("MyDefaultType",target.get(FileOutputFormat.COMPRESS_TYPE)); assertFalse(target.getBoolean(MRJobConfig.MAP_OUTPUT_COMPRESS,true)); assertEquals("MyDefaultCodec2",target.get(MRJobConfig.MAP_OUTPUT_COMPRESS_CODEC)); assertFalse(CompressionEmulationUtil.isInputCompressionEmulationEnabled(target)); source.setBoolean(FileOutputFormat.COMPRESS,true); source.set(FileOutputFormat.COMPRESS_CODEC,"MyCodec"); source.set(FileOutputFormat.COMPRESS_TYPE,"MyType"); source.setBoolean(MRJobConfig.MAP_OUTPUT_COMPRESS,true); source.set(MRJobConfig.MAP_OUTPUT_COMPRESS_CODEC,"MyCodec2"); org.apache.hadoop.mapred.FileInputFormat.setInputPaths(source,"file.gz"); target=new JobConf(); CompressionEmulationUtil.configureCompressionEmulation(source,target); assertTrue(target.getBoolean(FileOutputFormat.COMPRESS,false)); assertEquals("MyCodec",target.get(FileOutputFormat.COMPRESS_CODEC)); assertEquals("MyType",target.get(FileOutputFormat.COMPRESS_TYPE)); assertTrue(target.getBoolean(MRJobConfig.MAP_OUTPUT_COMPRESS,false)); assertEquals("MyCodec2",target.get(MRJobConfig.MAP_OUTPUT_COMPRESS_CODEC)); assertTrue(CompressionEmulationUtil.isInputCompressionEmulationEnabled(target)); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test compressible {@link GridmixRecord}. */ @Test public void testCompressibleGridmixRecord() throws IOException { JobConf conf=new JobConf(); CompressionEmulationUtil.setCompressionEmulationEnabled(conf,true); CompressionEmulationUtil.setInputCompressionEmulationEnabled(conf,true); FileSystem lfs=FileSystem.getLocal(conf); int dataSize=1024 * 1024 * 10; float ratio=0.357F; Path rootTempDir=new Path(System.getProperty("test.build.data","/tmp")).makeQualified(lfs.getUri(),lfs.getWorkingDirectory()); Path tempDir=new Path(rootTempDir,"TestPossiblyCompressibleGridmixRecord"); lfs.delete(tempDir,true); GridmixRecord record=new GridmixRecord(dataSize,0); record.setCompressibility(true,ratio); conf.setClass(FileOutputFormat.COMPRESS_CODEC,GzipCodec.class,CompressionCodec.class); org.apache.hadoop.mapred.FileOutputFormat.setCompressOutput(conf,true); Path recordFile=new Path(tempDir,"record"); OutputStream outStream=CompressionEmulationUtil.getPossiblyCompressedOutputStream(recordFile,conf); DataOutputStream out=new DataOutputStream(outStream); record.write(out); out.close(); outStream.close(); Path actualRecordFile=recordFile.suffix(".gz"); InputStream in=CompressionEmulationUtil.getPossiblyDecompressedInputStream(actualRecordFile,conf,0); long compressedFileSize=lfs.listStatus(actualRecordFile)[0].getLen(); GridmixRecord recordRead=new GridmixRecord(); recordRead.readFields(new DataInputStream(in)); assertEquals("Record size mismatch in a compressible GridmixRecord",dataSize,recordRead.getSize()); assertTrue("Failed to generate a compressible GridmixRecord",recordRead.getSize() > compressedFileSize); float seenRatio=((float)compressedFileSize) / dataSize; assertEquals(CompressionEmulationUtil.standardizeCompressionRatio(ratio),CompressionEmulationUtil.standardizeCompressionRatio(seenRatio),1.0D); }

Class: org.apache.hadoop.mapred.gridmix.TestDistCacheEmulation

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Validate GenerateDistCacheData job if it creates dist cache files properly. * @throws Exception */ @Test(timeout=200000) public void testGenerateDistCacheData() throws Exception { long[] sortedFileSizes=new long[5]; Configuration jobConf=runSetupGenerateDistCacheData(true,sortedFileSizes); GridmixJob gridmixJob=new GenerateDistCacheData(jobConf); Job job=gridmixJob.call(); assertEquals("Number of reduce tasks in GenerateDistCacheData is not 0.",0,job.getNumReduceTasks()); assertTrue("GenerateDistCacheData job failed.",job.waitForCompletion(false)); validateDistCacheData(jobConf,sortedFileSizes); }

Class: org.apache.hadoop.mapred.gridmix.TestFilePool

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testPool() throws Exception { final Random r=new Random(); final Configuration conf=new Configuration(); conf.setLong(FilePool.GRIDMIX_MIN_FILE,3 * 1024); final FilePool pool=new FilePool(conf,base); pool.refresh(); final ArrayList files=new ArrayList(); final int expectedPoolSize=(NFILES / 2 * (NFILES / 2 + 1) - 6) * 1024; assertEquals(expectedPoolSize,pool.getInputFiles(Long.MAX_VALUE,files)); assertEquals(NFILES - 4,files.size()); files.clear(); assertEquals(expectedPoolSize,pool.getInputFiles(expectedPoolSize,files)); files.clear(); final long rand=r.nextInt(expectedPoolSize); assertTrue("Missed: " + rand,(NFILES / 2) * 1024 > rand - pool.getInputFiles(rand,files)); conf.setLong(FilePool.GRIDMIX_MIN_FILE,0); pool.refresh(); files.clear(); assertEquals((NFILES / 2 * (NFILES / 2 + 1)) * 1024,pool.getInputFiles(Long.MAX_VALUE,files)); }

Class: org.apache.hadoop.mapred.gridmix.TestGridMixClasses

APIUtilityVerifier BranchVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=120000) public void testSerialReaderThread() throws Exception { Configuration conf=new Configuration(); File fin=new File("src" + File.separator + "test"+ File.separator+ "resources"+ File.separator+ "data"+ File.separator+ "wordcount2.json"); JobStoryProducer jobProducer=new ZombieJobProducer(new Path(fin.getAbsolutePath()),null,conf); CountDownLatch startFlag=new CountDownLatch(1); UserResolver resolver=new SubmitterUserResolver(); FakeJobSubmitter submitter=new FakeJobSubmitter(); File ws=new File("target" + File.separator + this.getClass().getName()); if (!ws.exists()) { Assert.assertTrue(ws.mkdirs()); } SerialJobFactory jobFactory=new SerialJobFactory(submitter,jobProducer,new Path(ws.getAbsolutePath()),conf,startFlag,resolver); Path ioPath=new Path(ws.getAbsolutePath()); jobFactory.setDistCacheEmulator(new DistributedCacheEmulator(conf,ioPath)); Thread test=jobFactory.createReaderThread(); test.start(); Thread.sleep(1000); assertEquals(0,submitter.getJobs().size()); startFlag.countDown(); while (test.isAlive()) { Thread.sleep(1000); jobFactory.update(null); } assertEquals(2,submitter.getJobs().size()); }

BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=3000) public void testSleepReducer() throws Exception { Configuration conf=new Configuration(); conf.setInt(JobContext.NUM_REDUCES,2); CompressionEmulationUtil.setCompressionEmulationEnabled(conf,true); conf.setBoolean(FileOutputFormat.COMPRESS,true); CompressionEmulationUtil.setCompressionEmulationEnabled(conf,true); conf.setBoolean(MRJobConfig.MAP_OUTPUT_COMPRESS,true); TaskAttemptID taskId=new TaskAttemptID(); RawKeyValueIterator input=new FakeRawKeyValueReducerIterator(); Counter counter=new GenericCounter(); Counter inputValueCounter=new GenericCounter(); RecordWriter output=new LoadRecordReduceWriter(); OutputCommitter committer=new CustomOutputCommitter(); StatusReporter reporter=new DummyReporter(); RawComparator comparator=new FakeRawComparator(); ReduceContext reducecontext=new ReduceContextImpl(conf,taskId,input,counter,inputValueCounter,output,committer,reporter,comparator,GridmixKey.class,NullWritable.class); org.apache.hadoop.mapreduce.Reducer.Context context=new WrappedReducer().getReducerContext(reducecontext); SleepReducer test=new SleepReducer(); long start=System.currentTimeMillis(); test.setup(context); long sleeper=context.getCurrentKey().getReduceOutputBytes(); assertEquals("Sleeping... " + sleeper + " ms left",context.getStatus()); assertTrue(System.currentTimeMillis() >= (start + sleeper)); test.cleanup(context); assertEquals("Slept for " + sleeper,context.getStatus()); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testCompareGridmixJob() throws Exception { Configuration conf=new Configuration(); Path outRoot=new Path("target"); JobStory jobDesc=mock(JobStory.class); when(jobDesc.getName()).thenReturn("JobName"); when(jobDesc.getJobConf()).thenReturn(new JobConf(conf)); UserGroupInformation ugi=UserGroupInformation.getCurrentUser(); GridmixJob j1=new LoadJob(conf,1000L,jobDesc,outRoot,ugi,0); GridmixJob j2=new LoadJob(conf,1000L,jobDesc,outRoot,ugi,0); GridmixJob j3=new LoadJob(conf,1000L,jobDesc,outRoot,ugi,1); GridmixJob j4=new LoadJob(conf,1000L,jobDesc,outRoot,ugi,1); assertTrue(j1.equals(j2)); assertEquals(0,j1.compareTo(j2)); assertFalse(j1.equals(j3)); assertEquals(-1,j1.compareTo(j3)); assertEquals(-1,j1.compareTo(j4)); }

BooleanVerifier EqualityVerifier HybridVerifier 
@SuppressWarnings({"unchecked","rawtypes"}) @Test(timeout=30000) public void testSleepMapper() throws Exception { SleepJob.SleepMapper test=new SleepJob.SleepMapper(); Configuration conf=new Configuration(); conf.setInt(JobContext.NUM_REDUCES,2); CompressionEmulationUtil.setCompressionEmulationEnabled(conf,true); conf.setBoolean(MRJobConfig.MAP_OUTPUT_COMPRESS,true); TaskAttemptID taskId=new TaskAttemptID(); FakeRecordLLReader reader=new FakeRecordLLReader(); LoadRecordGkNullWriter writer=new LoadRecordGkNullWriter(); OutputCommitter committer=new CustomOutputCommitter(); StatusReporter reporter=new TaskAttemptContextImpl.DummyReporter(); SleepSplit split=getSleepSplit(); MapContext mapcontext=new MapContextImpl(conf,taskId,reader,writer,committer,reporter,split); Context context=new WrappedMapper().getMapContext(mapcontext); long start=System.currentTimeMillis(); LOG.info("start:" + start); LongWritable key=new LongWritable(start + 2000); LongWritable value=new LongWritable(start + 2000); test.map(key,value,context); LOG.info("finish:" + System.currentTimeMillis()); assertTrue(System.currentTimeMillis() >= (start + 2000)); test.cleanup(context); assertEquals(1,writer.getData().size()); }

Class: org.apache.hadoop.mapred.gridmix.TestGridmixMemoryEmulation

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Test {@link TotalHeapUsageEmulatorPlugin}. */ @Test public void testTotalHeapUsageEmulatorPlugin() throws Exception { Configuration conf=new Configuration(); ResourceCalculatorPlugin monitor=new DummyResourceCalculatorPlugin(); long maxHeapUsage=1024 * TotalHeapUsageEmulatorPlugin.ONE_MB; conf.setLong(DummyResourceCalculatorPlugin.MAXPMEM_TESTING_PROPERTY,maxHeapUsage); monitor.setConf(conf); conf.setFloat(TotalHeapUsageEmulatorPlugin.MIN_HEAP_FREE_RATIO,0F); conf.setFloat(TotalHeapUsageEmulatorPlugin.HEAP_LOAD_RATIO,1F); long targetHeapUsageInMB=200; FakeProgressive fakeProgress=new FakeProgressive(); FakeHeapUsageEmulatorCore fakeCore=new FakeHeapUsageEmulatorCore(); FakeHeapUsageEmulatorPlugin heapPlugin=new FakeHeapUsageEmulatorPlugin(fakeCore); ResourceUsageMetrics invalidUsage=TestResourceUsageEmulators.createMetrics(0); heapPlugin.initialize(conf,invalidUsage,null,null); int numCallsPre=fakeCore.getNumCalls(); long heapUsagePre=fakeCore.getHeapUsageInMB(); heapPlugin.emulate(); int numCallsPost=fakeCore.getNumCalls(); long heapUsagePost=fakeCore.getHeapUsageInMB(); assertEquals("Disabled heap usage emulation plugin works!",numCallsPre,numCallsPost); assertEquals("Disabled heap usage emulation plugin works!",heapUsagePre,heapUsagePost); float progress=heapPlugin.getProgress(); assertEquals("Invalid progress of disabled cumulative heap usage emulation " + "plugin!",1.0f,progress,0f); Boolean failed=null; invalidUsage=TestResourceUsageEmulators.createMetrics(maxHeapUsage + TotalHeapUsageEmulatorPlugin.ONE_MB); try { heapPlugin.initialize(conf,invalidUsage,monitor,null); failed=false; } catch ( Exception e) { failed=true; } assertNotNull("Fail case failure!",failed); assertTrue("Expected failure!",failed); ResourceUsageMetrics metrics=TestResourceUsageEmulators.createMetrics(targetHeapUsageInMB * TotalHeapUsageEmulatorPlugin.ONE_MB); testEmulationAccuracy(conf,fakeCore,monitor,metrics,heapPlugin,200,10); conf.setFloat(TotalHeapUsageEmulatorPlugin.HEAP_EMULATION_PROGRESS_INTERVAL,0.2F); testEmulationAccuracy(conf,fakeCore,monitor,metrics,heapPlugin,200,5); conf.setFloat(TotalHeapUsageEmulatorPlugin.HEAP_LOAD_RATIO,1F); conf.setFloat(TotalHeapUsageEmulatorPlugin.MIN_HEAP_FREE_RATIO,0.5F); testEmulationAccuracy(conf,fakeCore,monitor,metrics,heapPlugin,120,2); conf.setFloat(TotalHeapUsageEmulatorPlugin.HEAP_LOAD_RATIO,0.5F); conf.setFloat(TotalHeapUsageEmulatorPlugin.MIN_HEAP_FREE_RATIO,0F); testEmulationAccuracy(conf,fakeCore,monitor,metrics,heapPlugin,200,10); conf.setFloat(TotalHeapUsageEmulatorPlugin.MIN_HEAP_FREE_RATIO,0.25F); conf.setFloat(TotalHeapUsageEmulatorPlugin.HEAP_LOAD_RATIO,0.5F); testEmulationAccuracy(conf,fakeCore,monitor,metrics,heapPlugin,162,6); fakeProgress=new FakeProgressive(); conf.setFloat(TotalHeapUsageEmulatorPlugin.MIN_HEAP_FREE_RATIO,0F); conf.setFloat(TotalHeapUsageEmulatorPlugin.HEAP_LOAD_RATIO,1F); conf.setFloat(TotalHeapUsageEmulatorPlugin.HEAP_EMULATION_PROGRESS_INTERVAL,0.25F); heapPlugin.initialize(conf,metrics,monitor,fakeProgress); fakeCore.resetFake(); long initHeapUsage=fakeCore.getHeapUsageInMB(); long initNumCallsUsage=fakeCore.getNumCalls(); testEmulationBoundary(0F,fakeCore,fakeProgress,heapPlugin,initHeapUsage,initNumCallsUsage,"[no-op, 0 progress]"); testEmulationBoundary(0.24F,fakeCore,fakeProgress,heapPlugin,initHeapUsage,initNumCallsUsage,"[no-op, 24% progress]"); testEmulationBoundary(0.25F,fakeCore,fakeProgress,heapPlugin,targetHeapUsageInMB / 4,1,"[op, 25% progress]"); testEmulationBoundary(0.80F,fakeCore,fakeProgress,heapPlugin,(targetHeapUsageInMB * 4) / 5,2,"[op, 80% progress]"); testEmulationBoundary(1F,fakeCore,fakeProgress,heapPlugin,targetHeapUsageInMB,3,"[op, 100% progress]"); }

Class: org.apache.hadoop.mapred.gridmix.TestGridmixSubmission

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=100000) public void testMain() throws Exception { SecurityManager securityManager=System.getSecurityManager(); final ByteArrayOutputStream bytes=new ByteArrayOutputStream(); final PrintStream out=new PrintStream(bytes); final PrintStream oldOut=System.out; System.setErr(out); ExitUtil.disableSystemExit(); try { String[] argv=new String[0]; DebugGridmix.main(argv); } catch ( ExitUtil.ExitException e) { assertEquals("ExitException",e.getMessage()); ExitUtil.resetFirstExitException(); } finally { System.setErr(oldOut); System.setSecurityManager(securityManager); } String print=bytes.toString(); assertTrue(print.contains("Usage: gridmix [-generate ] [-users URI] [-Dname=value ...] ")); assertTrue(print.contains("e.g. gridmix -generate 100m foo -")); }

Class: org.apache.hadoop.mapred.gridmix.TestGridmixSummary

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Test {@link DataStatistics}. */ @Test public void testDataStatistics() throws Exception { DataStatistics stats=new DataStatistics(10,2,true); assertEquals("Data size mismatch",10,stats.getDataSize()); assertEquals("Num files mismatch",2,stats.getNumFiles()); assertTrue("Compression configuration mismatch",stats.isDataCompressed()); stats=new DataStatistics(100,5,false); assertEquals("Data size mismatch",100,stats.getDataSize()); assertEquals("Num files mismatch",5,stats.getNumFiles()); assertFalse("Compression configuration mismatch",stats.isDataCompressed()); Configuration conf=new Configuration(); Path rootTempDir=new Path(System.getProperty("test.build.data","/tmp")); Path testDir=new Path(rootTempDir,"testDataStatistics"); FileSystem fs=testDir.getFileSystem(conf); fs.delete(testDir,true); Path testInputDir=new Path(testDir,"test"); fs.mkdirs(testInputDir); CompressionEmulationUtil.setCompressionEmulationEnabled(conf,true); Boolean failed=null; try { GenerateData.publishDataStatistics(testInputDir,1024L,conf); failed=false; } catch ( RuntimeException e) { failed=true; } assertNotNull("Expected failure!",failed); assertTrue("Compression data publishing error",failed); CompressionEmulationUtil.setCompressionEmulationEnabled(conf,false); stats=GenerateData.publishDataStatistics(testInputDir,1024L,conf); assertEquals("Data size mismatch",0,stats.getDataSize()); assertEquals("Num files mismatch",0,stats.getNumFiles()); assertFalse("Compression configuration mismatch",stats.isDataCompressed()); CompressionEmulationUtil.setCompressionEmulationEnabled(conf,false); Path inputDataFile=new Path(testInputDir,"test"); long size=UtilsForTests.createTmpFileDFS(fs,inputDataFile,FsPermission.createImmutable((short)777),"hi hello bye").size(); stats=GenerateData.publishDataStatistics(testInputDir,-1,conf); assertEquals("Data size mismatch",size,stats.getDataSize()); assertEquals("Num files mismatch",1,stats.getNumFiles()); assertFalse("Compression configuration mismatch",stats.isDataCompressed()); CompressionEmulationUtil.setCompressionEmulationEnabled(conf,true); failed=null; try { GenerateData.publishDataStatistics(testInputDir,1234L,conf); failed=false; } catch ( RuntimeException e) { failed=true; } assertNotNull("Expected failure!",failed); assertTrue("Compression data publishing error",failed); CompressionEmulationUtil.setCompressionEmulationEnabled(conf,false); fs.delete(inputDataFile,false); inputDataFile=new Path(testInputDir,"test.gz"); size=UtilsForTests.createTmpFileDFS(fs,inputDataFile,FsPermission.createImmutable((short)777),"hi hello").size(); stats=GenerateData.publishDataStatistics(testInputDir,1234L,conf); assertEquals("Data size mismatch",size,stats.getDataSize()); assertEquals("Num files mismatch",1,stats.getNumFiles()); assertFalse("Compression configuration mismatch",stats.isDataCompressed()); CompressionEmulationUtil.setCompressionEmulationEnabled(conf,true); stats=GenerateData.publishDataStatistics(testInputDir,1234L,conf); assertEquals("Data size mismatch",size,stats.getDataSize()); assertEquals("Num files mismatch",1,stats.getNumFiles()); assertTrue("Compression configuration mismatch",stats.isDataCompressed()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test {@link ExecutionSummarizer}. */ @Test @SuppressWarnings({"unchecked","rawtypes"}) public void testExecutionSummarizer() throws IOException { Configuration conf=new Configuration(); ExecutionSummarizer es=new ExecutionSummarizer(); assertEquals("ExecutionSummarizer init failed",Summarizer.NA,es.getCommandLineArgsString()); long startTime=System.currentTimeMillis(); String[] initArgs=new String[]{"-Xmx20m","-Dtest.args='test'"}; es=new ExecutionSummarizer(initArgs); assertEquals("ExecutionSummarizer init failed","-Xmx20m -Dtest.args='test'",es.getCommandLineArgsString()); assertTrue("Start time mismatch",es.getStartTime() >= startTime); assertTrue("Start time mismatch",es.getStartTime() <= System.currentTimeMillis()); es.update(null); assertEquals("ExecutionSummarizer init failed",0,es.getSimulationStartTime()); testExecutionSummarizer(0,0,0,0,0,0,0,es); long simStartTime=System.currentTimeMillis(); es.start(null); assertTrue("Simulation start time mismatch",es.getSimulationStartTime() >= simStartTime); assertTrue("Simulation start time mismatch",es.getSimulationStartTime() <= System.currentTimeMillis()); JobStats stats=generateFakeJobStats(1,10,true,false); es.update(stats); testExecutionSummarizer(1,10,0,1,1,0,0,es); stats=generateFakeJobStats(5,1,false,false); es.update(stats); testExecutionSummarizer(6,11,0,2,1,1,0,es); stats=generateFakeJobStats(1,1,true,true); es.update(stats); testExecutionSummarizer(7,12,0,3,1,1,1,es); stats=generateFakeJobStats(2,2,false,true); es.update(stats); testExecutionSummarizer(9,14,0,4,1,1,2,es); JobFactory factory=new FakeJobFactory(conf); factory.numJobsInTrace=3; Path rootTempDir=new Path(System.getProperty("test.build.data","/tmp")); Path testDir=new Path(rootTempDir,"testGridmixSummary"); Path testTraceFile=new Path(testDir,"test-trace.json"); FileSystem fs=FileSystem.getLocal(conf); fs.create(testTraceFile).close(); UserResolver resolver=new RoundRobinUserResolver(); DataStatistics dataStats=new DataStatistics(100,2,true); String policy=GridmixJobSubmissionPolicy.REPLAY.name(); conf.set(GridmixJobSubmissionPolicy.JOB_SUBMISSION_POLICY,policy); es.finalize(factory,testTraceFile.toString(),1024L,resolver,dataStats,conf); assertEquals("Mismtach in num jobs in trace",3,es.getNumJobsInTrace()); String tid=ExecutionSummarizer.getTraceSignature(testTraceFile.toString()); assertEquals("Mismatch in trace signature",tid,es.getInputTraceSignature()); Path qPath=fs.makeQualified(testTraceFile); assertEquals("Mismatch in trace filename",qPath.toString(),es.getInputTraceLocation()); assertEquals("Mismatch in expected data size","1 K",es.getExpectedDataSize()); assertEquals("Mismatch in input data statistics",ExecutionSummarizer.stringifyDataStatistics(dataStats),es.getInputDataStatistics()); assertEquals("Mismatch in user resolver",resolver.getClass().getName(),es.getUserResolver()); assertEquals("Mismatch in policy",policy,es.getJobSubmissionPolicy()); es.finalize(factory,testTraceFile.toString(),1024 * 1024 * 1024* 10L,resolver,dataStats,conf); assertEquals("Mismatch in expected data size","10 G",es.getExpectedDataSize()); fs.delete(testTraceFile,false); try { Thread.sleep(1000); } catch ( InterruptedException ie) { } fs.create(testTraceFile).close(); es.finalize(factory,testTraceFile.toString(),0L,resolver,dataStats,conf); assertEquals("Mismatch in trace data size",Summarizer.NA,es.getExpectedDataSize()); assertFalse("Mismatch in trace signature",tid.equals(es.getInputTraceSignature())); tid=ExecutionSummarizer.getTraceSignature(testTraceFile.toString()); assertEquals("Mismatch in trace signature",tid,es.getInputTraceSignature()); testTraceFile=new Path(testDir,"test-trace2.json"); fs.create(testTraceFile).close(); es.finalize(factory,testTraceFile.toString(),0L,resolver,dataStats,conf); assertFalse("Mismatch in trace signature",tid.equals(es.getInputTraceSignature())); tid=ExecutionSummarizer.getTraceSignature(testTraceFile.toString()); assertEquals("Mismatch in trace signature",tid,es.getInputTraceSignature()); es.finalize(factory,"-",0L,resolver,dataStats,conf); assertEquals("Mismatch in trace signature",Summarizer.NA,es.getInputTraceSignature()); assertEquals("Mismatch in trace file location",Summarizer.NA,es.getInputTraceLocation()); }

Class: org.apache.hadoop.mapred.gridmix.TestHighRamJob

BooleanVerifier NullVerifier HybridVerifier 
/** * Tests high ram job properties configuration. */ @SuppressWarnings("deprecation") @Test public void testHighRamFeatureEmulation() throws IOException { Configuration gridmixConf=new Configuration(); gridmixConf.setBoolean(GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE,false); testHighRamConfig(10,20,5,10,MRJobConfig.DEFAULT_MAP_MEMORY_MB,MRJobConfig.DEFAULT_REDUCE_MEMORY_MB,MRJobConfig.DEFAULT_MAP_MEMORY_MB,MRJobConfig.DEFAULT_REDUCE_MEMORY_MB,gridmixConf); gridmixConf=new Configuration(); gridmixConf.setLong(JobConf.UPPER_LIMIT_ON_TASK_VMEM_PROPERTY,20 * 1024 * 1024); testHighRamConfig(10,20,5,10,5,10,10,20,gridmixConf); gridmixConf=new Configuration(); gridmixConf.setLong(JTConfig.JT_MAX_MAPMEMORY_MB,100); gridmixConf.setLong(JTConfig.JT_MAX_REDUCEMEMORY_MB,300); testHighRamConfig(10,45,5,15,50,100,100,300,gridmixConf); gridmixConf=new Configuration(); gridmixConf.setLong(JobConf.UPPER_LIMIT_ON_TASK_VMEM_PROPERTY,70 * 1024 * 1024); Boolean failed=null; try { testHighRamConfig(10,45,5,15,50,100,100,300,gridmixConf); failed=false; } catch ( Exception e) { failed=true; } assertNotNull(failed); assertTrue("Exception expected for exceeding map memory limit " + "(deprecation)!",failed); gridmixConf=new Configuration(); gridmixConf.setLong(JobConf.UPPER_LIMIT_ON_TASK_VMEM_PROPERTY,150 * 1024 * 1024); failed=null; try { testHighRamConfig(10,45,5,15,50,100,100,300,gridmixConf); failed=false; } catch ( Exception e) { failed=true; } assertNotNull(failed); assertTrue("Exception expected for exceeding reduce memory limit " + "(deprecation)!",failed); gridmixConf=new Configuration(); gridmixConf.setLong(JTConfig.JT_MAX_MAPMEMORY_MB,70); failed=null; try { testHighRamConfig(10,45,5,15,50,100,100,300,gridmixConf); failed=false; } catch ( Exception e) { failed=true; } assertNotNull(failed); assertTrue("Exception expected for exceeding map memory limit!",failed); gridmixConf=new Configuration(); gridmixConf.setLong(JTConfig.JT_MAX_REDUCEMEMORY_MB,200); failed=null; try { testHighRamConfig(10,45,5,15,50,100,100,300,gridmixConf); failed=false; } catch ( Exception e) { failed=true; } assertNotNull(failed); assertTrue("Exception expected for exceeding reduce memory limit!",failed); }

Class: org.apache.hadoop.mapred.gridmix.TestResourceUsageEmulators

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test {@link LoadJob.ResourceUsageMatcherRunner}. */ @Test @SuppressWarnings("unchecked") public void testResourceUsageMatcherRunner() throws Exception { Configuration conf=new Configuration(); FakeProgressive progress=new FakeProgressive(); conf.setClass(TTConfig.TT_RESOURCE_CALCULATOR_PLUGIN,DummyResourceCalculatorPlugin.class,ResourceCalculatorPlugin.class); conf.setClass(ResourceUsageMatcher.RESOURCE_USAGE_EMULATION_PLUGINS,TestResourceUsageEmulatorPlugin.class,ResourceUsageEmulatorPlugin.class); long currentTime=System.currentTimeMillis(); TaskAttemptID id=new TaskAttemptID("test",1,TaskType.MAP,1,1); StatusReporter reporter=new DummyReporter(progress); TaskInputOutputContext context=new MapContextImpl(conf,id,null,null,null,reporter,null); FakeResourceUsageMatcherRunner matcher=new FakeResourceUsageMatcherRunner(context,null); String identifier=TestResourceUsageEmulatorPlugin.DEFAULT_IDENTIFIER; long initTime=TestResourceUsageEmulatorPlugin.testInitialization(identifier,conf); assertTrue("ResourceUsageMatcherRunner failed to initialize the" + " configured plugin",initTime > currentTime); assertEquals("Progress mismatch in ResourceUsageMatcherRunner",0,progress.getProgress(),0D); progress.setProgress(0.01f); currentTime=System.currentTimeMillis(); matcher.test(); long emulateTime=TestResourceUsageEmulatorPlugin.testEmulation(identifier,conf); assertTrue("ProgressBasedResourceUsageMatcher failed to load and emulate" + " the configured plugin",emulateTime > currentTime); }

Class: org.apache.hadoop.mapred.gridmix.TestUserResolve

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testSubmitterResolver() throws Exception { final UserResolver rslv=new SubmitterUserResolver(); assertFalse(rslv.needsTargetUsersList()); UserGroupInformation ugi=UserGroupInformation.getCurrentUser(); assertEquals(ugi,rslv.getTargetUgi((UserGroupInformation)null)); }

Class: org.apache.hadoop.mapred.jobcontrol.TestJobControl

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testGetAssignedJobId() throws Exception { JobConf jc=new JobConf(); Job j=new Job(jc); assertNull(j.getAssignedJobID()); org.apache.hadoop.mapreduce.Job mockjob=mock(org.apache.hadoop.mapreduce.Job.class); org.apache.hadoop.mapreduce.JobID jid=new org.apache.hadoop.mapreduce.JobID("test",0); when(mockjob.getJobID()).thenReturn(jid); j.setJob(mockjob); JobID expected=new JobID("test",0); assertEquals(expected,j.getAssignedJobID()); verify(mockjob).getJobID(); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testAddingDependingJob() throws Exception { Job job_1=getCopyJob(); ArrayList dependingJobs=new ArrayList(); JobControl jc=new JobControl("Test"); jc.addJob(job_1); Assert.assertEquals(Job.WAITING,job_1.getState()); Assert.assertTrue(job_1.addDependingJob(new Job(job_1.getJobConf(),dependingJobs))); }

Class: org.apache.hadoop.mapred.lib.db.TestDBInputFormat

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * test configuration for db. should works DBConfiguration.* parameters. */ @Test(timeout=5000) public void testSetInput(){ JobConf configuration=new JobConf(); String[] fieldNames={"field1","field2"}; DBInputFormat.setInput(configuration,NullDBWritable.class,"table","conditions","orderBy",fieldNames); assertEquals("org.apache.hadoop.mapred.lib.db.DBInputFormat$NullDBWritable",configuration.getClass(DBConfiguration.INPUT_CLASS_PROPERTY,null).getName()); assertEquals("table",configuration.get(DBConfiguration.INPUT_TABLE_NAME_PROPERTY,null)); String[] fields=configuration.getStrings(DBConfiguration.INPUT_FIELD_NAMES_PROPERTY); assertEquals("field1",fields[0]); assertEquals("field2",fields[1]); assertEquals("conditions",configuration.get(DBConfiguration.INPUT_CONDITIONS_PROPERTY,null)); assertEquals("orderBy",configuration.get(DBConfiguration.INPUT_ORDER_BY_PROPERTY,null)); configuration=new JobConf(); DBInputFormat.setInput(configuration,NullDBWritable.class,"query","countQuery"); assertEquals("query",configuration.get(DBConfiguration.INPUT_QUERY,null)); assertEquals("countQuery",configuration.get(DBConfiguration.INPUT_COUNT_QUERY,null)); JobConf jConfiguration=new JobConf(); DBConfiguration.configureDB(jConfiguration,"driverClass","dbUrl","user","password"); assertEquals("driverClass",jConfiguration.get(DBConfiguration.DRIVER_CLASS_PROPERTY)); assertEquals("dbUrl",jConfiguration.get(DBConfiguration.URL_PROPERTY)); assertEquals("user",jConfiguration.get(DBConfiguration.USERNAME_PROPERTY)); assertEquals("password",jConfiguration.get(DBConfiguration.PASSWORD_PROPERTY)); jConfiguration=new JobConf(); DBConfiguration.configureDB(jConfiguration,"driverClass","dbUrl"); assertEquals("driverClass",jConfiguration.get(DBConfiguration.DRIVER_CLASS_PROPERTY)); assertEquals("dbUrl",jConfiguration.get(DBConfiguration.URL_PROPERTY)); assertNull(jConfiguration.get(DBConfiguration.USERNAME_PROPERTY)); assertNull(jConfiguration.get(DBConfiguration.PASSWORD_PROPERTY)); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * test DBRecordReader. This reader should creates keys, values, know about position.. */ @SuppressWarnings("unchecked") @Test(timeout=5000) public void testDBRecordReader() throws Exception { JobConf job=mock(JobConf.class); DBConfiguration dbConfig=mock(DBConfiguration.class); String[] fields={"field1","filed2"}; @SuppressWarnings("rawtypes") DBRecordReader reader=new DBInputFormat().new DBRecordReader(new DBInputSplit(),NullDBWritable.class,job,DriverForTest.getConnection(),dbConfig,"condition",fields,"table"); LongWritable key=reader.createKey(); assertEquals(0,key.get()); DBWritable value=reader.createValue(); assertEquals("org.apache.hadoop.mapred.lib.db.DBInputFormat$NullDBWritable",value.getClass().getName()); assertEquals(0,reader.getPos()); assertFalse(reader.next(key,value)); }

Class: org.apache.hadoop.mapred.pipes.TestPipeApplication

BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * test org.apache.hadoop.mapred.pipes.Submitter * @throws Exception */ @Test public void testSubmitter() throws Exception { JobConf conf=new JobConf(); File[] psw=cleanTokenPasswordFile(); System.setProperty("test.build.data","target/tmp/build/TEST_SUBMITTER_MAPPER/data"); conf.set("hadoop.log.dir","target/tmp"); Submitter.setIsJavaMapper(conf,false); Submitter.setIsJavaReducer(conf,false); Submitter.setKeepCommandFile(conf,false); Submitter.setIsJavaRecordReader(conf,false); Submitter.setIsJavaRecordWriter(conf,false); PipesPartitioner partitioner=new PipesPartitioner(); partitioner.configure(conf); Submitter.setJavaPartitioner(conf,partitioner.getClass()); assertEquals(PipesPartitioner.class,(Submitter.getJavaPartitioner(conf))); SecurityManager securityManager=System.getSecurityManager(); PrintStream oldps=System.out; ByteArrayOutputStream out=new ByteArrayOutputStream(); ExitUtil.disableSystemExit(); try { System.setOut(new PrintStream(out)); Submitter.main(new String[0]); fail(); } catch ( ExitUtil.ExitException e) { assertTrue(out.toString().contains("")); assertTrue(out.toString().contains("bin/hadoop pipes")); assertTrue(out.toString().contains("[-input ] // Input directory")); assertTrue(out.toString().contains("[-output ] // Output directory")); assertTrue(out.toString().contains("[-jar // jar filename")); assertTrue(out.toString().contains("[-inputformat ] // InputFormat class")); assertTrue(out.toString().contains("[-map ] // Java Map class")); assertTrue(out.toString().contains("[-partitioner ] // Java Partitioner")); assertTrue(out.toString().contains("[-reduce ] // Java Reduce class")); assertTrue(out.toString().contains("[-writer ] // Java RecordWriter")); assertTrue(out.toString().contains("[-program ] // executable URI")); assertTrue(out.toString().contains("[-reduces ] // number of reduces")); assertTrue(out.toString().contains("[-lazyOutput ] // createOutputLazily")); assertTrue(out.toString().contains("-conf specify an application configuration file")); assertTrue(out.toString().contains("-D use value for given property")); assertTrue(out.toString().contains("-fs specify a namenode")); assertTrue(out.toString().contains("-jt specify a job tracker")); assertTrue(out.toString().contains("-files specify comma separated files to be copied to the map reduce cluster")); assertTrue(out.toString().contains("-libjars specify comma separated jar files to include in the classpath.")); assertTrue(out.toString().contains("-archives specify comma separated archives to be unarchived on the compute machines.")); } finally { System.setOut(oldps); System.setSecurityManager(securityManager); if (psw != null) { for ( File file : psw) { file.deleteOnExit(); } } } try { File fCommand=getFileCommand(null); String[] args=new String[22]; File input=new File(workSpace + File.separator + "input"); if (!input.exists()) { Assert.assertTrue(input.createNewFile()); } File outPut=new File(workSpace + File.separator + "output"); FileUtil.fullyDelete(outPut); args[0]="-input"; args[1]=input.getAbsolutePath(); args[2]="-output"; args[3]=outPut.getAbsolutePath(); args[4]="-inputformat"; args[5]="org.apache.hadoop.mapred.TextInputFormat"; args[6]="-map"; args[7]="org.apache.hadoop.mapred.lib.IdentityMapper"; args[8]="-partitioner"; args[9]="org.apache.hadoop.mapred.pipes.PipesPartitioner"; args[10]="-reduce"; args[11]="org.apache.hadoop.mapred.lib.IdentityReducer"; args[12]="-writer"; args[13]="org.apache.hadoop.mapred.TextOutputFormat"; args[14]="-program"; args[15]=fCommand.getAbsolutePath(); args[16]="-reduces"; args[17]="2"; args[18]="-lazyOutput"; args[19]="lazyOutput"; args[20]="-jobconf"; args[21]="mapreduce.pipes.isjavarecordwriter=false,mapreduce.pipes.isjavarecordreader=false"; Submitter.main(args); fail(); } catch ( ExitUtil.ExitException e) { assertEquals(e.status,0); } finally { System.setOut(oldps); System.setSecurityManager(securityManager); } }

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * test org.apache.hadoop.mapred.pipes.Application * test a internal functions: MessageType.REGISTER_COUNTER, INCREMENT_COUNTER, STATUS, PROGRESS... * @throws Throwable */ @Test public void testApplication() throws Throwable { JobConf conf=new JobConf(); RecordReader rReader=new Reader(); File fCommand=getFileCommand("org.apache.hadoop.mapred.pipes.PipeApplicationStub"); TestTaskReporter reporter=new TestTaskReporter(); File[] psw=cleanTokenPasswordFile(); try { conf.set(MRJobConfig.TASK_ATTEMPT_ID,taskName); conf.set(MRJobConfig.CACHE_LOCALFILES,fCommand.getAbsolutePath()); Token token=new Token("user".getBytes(),"password".getBytes(),new Text("kind"),new Text("service")); TokenCache.setJobToken(token,conf.getCredentials()); FakeCollector output=new FakeCollector(new Counters.Counter(),new Progress()); FileSystem fs=new RawLocalFileSystem(); fs.setConf(conf); Writer wr=new Writer(conf,fs.create(new Path(workSpace.getAbsolutePath() + File.separator + "outfile")),IntWritable.class,Text.class,null,null,true); output.setWriter(wr); conf.set(Submitter.PRESERVE_COMMANDFILE,"true"); initStdOut(conf); Application,Writable,IntWritable,Text> application=new Application,Writable,IntWritable,Text>(conf,rReader,output,reporter,IntWritable.class,Text.class); application.getDownlink().flush(); application.getDownlink().mapItem(new IntWritable(3),new Text("txt")); application.getDownlink().flush(); application.waitForFinish(); wr.close(); String stdOut=readStdOut(conf); assertTrue(stdOut.contains("key:3")); assertTrue(stdOut.contains("value:txt")); assertEquals(1.0,reporter.getProgress(),0.01); assertNotNull(reporter.getCounter("group","name")); assertEquals(reporter.getStatus(),"PROGRESS"); stdOut=readFile(new File(workSpace.getAbsolutePath() + File.separator + "outfile")); assertEquals(0.55f,rReader.getProgress(),0.001); application.getDownlink().close(); Entry entry=output.getCollect().entrySet().iterator().next(); assertEquals(123,entry.getKey().get()); assertEquals("value",entry.getValue().toString()); try { application.abort(new Throwable()); fail(); } catch ( IOException e) { assertEquals("pipe child exception",e.getMessage()); } } finally { if (psw != null) { for ( File file : psw) { file.deleteOnExit(); } } } }

Class: org.apache.hadoop.mapred.pipes.TestPipesNonJavaInputFormat

BranchVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * test PipesNonJavaInputFormat */ @Test public void testFormat() throws IOException { PipesNonJavaInputFormat inputFormat=new PipesNonJavaInputFormat(); JobConf conf=new JobConf(); Reporter reporter=mock(Reporter.class); RecordReader reader=inputFormat.getRecordReader(new FakeSplit(),conf,reporter); assertEquals(0.0f,reader.getProgress(),0.001); File input1=new File(workSpace + File.separator + "input1"); if (!input1.getParentFile().exists()) { Assert.assertTrue(input1.getParentFile().mkdirs()); } if (!input1.exists()) { Assert.assertTrue(input1.createNewFile()); } File input2=new File(workSpace + File.separator + "input2"); if (!input2.exists()) { Assert.assertTrue(input2.createNewFile()); } conf.set(org.apache.hadoop.mapreduce.lib.input.FileInputFormat.INPUT_DIR,StringUtils.escapeString(input1.getAbsolutePath()) + "," + StringUtils.escapeString(input2.getAbsolutePath())); InputSplit[] splits=inputFormat.getSplits(conf,2); assertEquals(2,splits.length); PipesNonJavaInputFormat.PipesDummyRecordReader dummyRecordReader=new PipesNonJavaInputFormat.PipesDummyRecordReader(conf,splits[0]); assertNull(dummyRecordReader.createKey()); assertNull(dummyRecordReader.createValue()); assertEquals(0,dummyRecordReader.getPos()); assertEquals(0.0,dummyRecordReader.getProgress(),0.001); assertTrue(dummyRecordReader.next(new FloatWritable(2.0f),NullWritable.get())); assertEquals(2.0,dummyRecordReader.getProgress(),0.001); dummyRecordReader.close(); }

Class: org.apache.hadoop.mapreduce.TestClientProtocolProviderImpls

UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
@Test public void testClusterWithLocalClientProvider() throws Exception { Configuration conf=new Configuration(); try { conf.set(MRConfig.FRAMEWORK_NAME,"incorrect"); new Cluster(conf); fail("Cluster should not be initialized with incorrect framework name"); } catch ( IOException e) { } conf.set(MRConfig.FRAMEWORK_NAME,"local"); Cluster cluster=new Cluster(conf); assertTrue(cluster.getClient() instanceof LocalJobRunner); cluster.close(); }

Class: org.apache.hadoop.mapreduce.TestLocalRunner

InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier 
/** * Test that the GC counter actually increments when we know that we've * spent some time in the GC during the mapper. */ @Test public void testGcCounter() throws Exception { Path inputPath=getInputPath(); Path outputPath=getOutputPath(); Configuration conf=new Configuration(); FileSystem fs=FileSystem.getLocal(conf); if (fs.exists(outputPath)) { fs.delete(outputPath,true); } if (fs.exists(inputPath)) { fs.delete(inputPath,true); } createInputFile(inputPath,0,20); Job job=Job.getInstance(); job.setMapperClass(GCMapper.class); job.setNumReduceTasks(0); job.getConfiguration().set(MRJobConfig.IO_SORT_MB,"25"); FileInputFormat.addInputPath(job,inputPath); FileOutputFormat.setOutputPath(job,outputPath); boolean ret=job.waitForCompletion(true); assertTrue("job failed",ret); Counter gcCounter=job.getCounters().findCounter(TaskCounter.GC_TIME_MILLIS); assertNotNull(gcCounter); assertTrue("No time spent in gc",gcCounter.getValue() > 0); }

Class: org.apache.hadoop.mapreduce.TestNewCombinerGrouping

APIUtilityVerifier BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testCombiner() throws Exception { if (!new File(TEST_ROOT_DIR).mkdirs()) { throw new RuntimeException("Could not create test dir: " + TEST_ROOT_DIR); } File in=new File(TEST_ROOT_DIR,"input"); if (!in.mkdirs()) { throw new RuntimeException("Could not create test dir: " + in); } File out=new File(TEST_ROOT_DIR,"output"); PrintWriter pw=new PrintWriter(new FileWriter(new File(in,"data.txt"))); pw.println("A|a,1"); pw.println("A|b,2"); pw.println("B|a,3"); pw.println("B|b,4"); pw.println("B|c,5"); pw.close(); JobConf conf=new JobConf(); conf.set("mapreduce.framework.name","local"); Job job=new Job(conf); TextInputFormat.setInputPaths(job,new Path(in.getPath())); TextOutputFormat.setOutputPath(job,new Path(out.getPath())); job.setMapperClass(Map.class); job.setReducerClass(Reduce.class); job.setInputFormatClass(TextInputFormat.class); job.setMapOutputKeyClass(Text.class); job.setMapOutputValueClass(LongWritable.class); job.setOutputFormatClass(TextOutputFormat.class); job.setGroupingComparatorClass(GroupComparator.class); job.setCombinerKeyGroupingComparatorClass(GroupComparator.class); job.setCombinerClass(Combiner.class); job.getConfiguration().setInt("min.num.spills.for.combine",0); job.submit(); job.waitForCompletion(false); if (job.isSuccessful()) { Counters counters=job.getCounters(); long combinerInputRecords=counters.findCounter("org.apache.hadoop.mapreduce.TaskCounter","COMBINE_INPUT_RECORDS").getValue(); long combinerOutputRecords=counters.findCounter("org.apache.hadoop.mapreduce.TaskCounter","COMBINE_OUTPUT_RECORDS").getValue(); Assert.assertTrue(combinerInputRecords > 0); Assert.assertTrue(combinerInputRecords > combinerOutputRecords); BufferedReader br=new BufferedReader(new FileReader(new File(out,"part-r-00000"))); Set output=new HashSet(); String line=br.readLine(); Assert.assertNotNull(line); output.add(line.substring(0,1) + line.substring(4,5)); line=br.readLine(); Assert.assertNotNull(line); output.add(line.substring(0,1) + line.substring(4,5)); line=br.readLine(); Assert.assertNull(line); br.close(); Set expected=new HashSet(); expected.add("A2"); expected.add("B5"); Assert.assertEquals(expected,output); } else { Assert.fail("Job failed"); } }

Class: org.apache.hadoop.mapreduce.TestShufflePlugin

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier 
@Test public void testPluginAbility(){ try { JobConf jobConf=new JobConf(); jobConf.setClass(MRConfig.SHUFFLE_CONSUMER_PLUGIN,TestShufflePlugin.TestShuffleConsumerPlugin.class,ShuffleConsumerPlugin.class); ShuffleConsumerPlugin shuffleConsumerPlugin=null; Class clazz=jobConf.getClass(MRConfig.SHUFFLE_CONSUMER_PLUGIN,Shuffle.class,ShuffleConsumerPlugin.class); assertNotNull("Unable to get " + MRConfig.SHUFFLE_CONSUMER_PLUGIN,clazz); shuffleConsumerPlugin=ReflectionUtils.newInstance(clazz,jobConf); assertNotNull("Unable to load " + MRConfig.SHUFFLE_CONSUMER_PLUGIN,shuffleConsumerPlugin); } catch ( Exception e) { assertTrue("Threw exception:" + e,false); } }

Class: org.apache.hadoop.mapreduce.TestTaskContext

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier IgnoredMethod HybridVerifier 
/** * Tests context.setStatus method. * TODO fix testcase * @throws IOException * @throws InterruptedException * @throws ClassNotFoundException */ @Test @Ignore public void testContextStatus() throws IOException, InterruptedException, ClassNotFoundException { Path test=new Path(testRootTempDir,"testContextStatus"); int numMaps=1; Job job=MapReduceTestUtil.createJob(createJobConf(),new Path(test,"in"),new Path(test,"out"),numMaps,0); job.setMapperClass(MyMapper.class); job.waitForCompletion(true); assertTrue("Job failed",job.isSuccessful()); TaskReport[] reports=job.getTaskReports(TaskType.MAP); assertEquals(numMaps,reports.length); assertEquals(myStatus,reports[0].getState()); int numReduces=1; job=MapReduceTestUtil.createJob(createJobConf(),new Path(test,"in"),new Path(test,"out"),numMaps,numReduces); job.setMapperClass(DataCopyMapper.class); job.setReducerClass(DataCopyReducer.class); job.setMapOutputKeyClass(Text.class); job.setMapOutputValueClass(Text.class); job.setOutputKeyClass(Text.class); job.setOutputValueClass(Text.class); job.setMaxMapAttempts(1); job.setMaxReduceAttempts(0); job.waitForCompletion(true); assertTrue("Job failed",job.isSuccessful()); }

Class: org.apache.hadoop.mapreduce.TestTypeConverter

APIUtilityVerifier UtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testFromYarnApplicationReport(){ ApplicationId mockAppId=mock(ApplicationId.class); when(mockAppId.getClusterTimestamp()).thenReturn(12345L); when(mockAppId.getId()).thenReturn(6789); ApplicationReport mockReport=mock(ApplicationReport.class); when(mockReport.getTrackingUrl()).thenReturn("dummy-tracking-url"); when(mockReport.getApplicationId()).thenReturn(mockAppId); when(mockReport.getYarnApplicationState()).thenReturn(YarnApplicationState.KILLED); when(mockReport.getUser()).thenReturn("dummy-user"); when(mockReport.getQueue()).thenReturn("dummy-queue"); String jobFile="dummy-path/job.xml"; try { JobStatus status=TypeConverter.fromYarn(mockReport,jobFile); } catch ( NullPointerException npe) { Assert.fail("Type converstion from YARN fails for jobs without " + "ApplicationUsageReport"); } ApplicationResourceUsageReport appUsageRpt=Records.newRecord(ApplicationResourceUsageReport.class); Resource r=Records.newRecord(Resource.class); r.setMemory(2048); appUsageRpt.setNeededResources(r); appUsageRpt.setNumReservedContainers(1); appUsageRpt.setNumUsedContainers(3); appUsageRpt.setReservedResources(r); appUsageRpt.setUsedResources(r); when(mockReport.getApplicationResourceUsageReport()).thenReturn(appUsageRpt); JobStatus status=TypeConverter.fromYarn(mockReport,jobFile); Assert.assertNotNull("fromYarn returned null status",status); Assert.assertEquals("jobFile set incorrectly","dummy-path/job.xml",status.getJobFile()); Assert.assertEquals("queue set incorrectly","dummy-queue",status.getQueue()); Assert.assertEquals("trackingUrl set incorrectly","dummy-tracking-url",status.getTrackingUrl()); Assert.assertEquals("user set incorrectly","dummy-user",status.getUsername()); Assert.assertEquals("schedulingInfo set incorrectly","dummy-tracking-url",status.getSchedulingInfo()); Assert.assertEquals("jobId set incorrectly",6789,status.getJobID().getId()); Assert.assertEquals("state set incorrectly",JobStatus.State.KILLED,status.getState()); Assert.assertEquals("needed mem info set incorrectly",2048,status.getNeededMem()); Assert.assertEquals("num rsvd slots info set incorrectly",1,status.getNumReservedSlots()); Assert.assertEquals("num used slots info set incorrectly",3,status.getNumUsedSlots()); Assert.assertEquals("rsvd mem info set incorrectly",2048,status.getReservedMem()); Assert.assertEquals("used mem info set incorrectly",2048,status.getUsedMem()); }

Class: org.apache.hadoop.mapreduce.filecache.TestClientDistributedCacheManager

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testDetermineTimestamps() throws IOException { Job job=Job.getInstance(conf); job.addCacheFile(firstCacheFile.toUri()); job.addCacheFile(secondCacheFile.toUri()); Configuration jobConf=job.getConfiguration(); Map statCache=new HashMap(); ClientDistributedCacheManager.determineTimestamps(jobConf,statCache); FileStatus firstStatus=statCache.get(firstCacheFile.toUri()); FileStatus secondStatus=statCache.get(secondCacheFile.toUri()); Assert.assertNotNull(firstStatus); Assert.assertNotNull(secondStatus); Assert.assertEquals(2,statCache.size()); String expected=firstStatus.getModificationTime() + "," + secondStatus.getModificationTime(); Assert.assertEquals(expected,jobConf.get(MRJobConfig.CACHE_FILE_TIMESTAMPS)); }

Class: org.apache.hadoop.mapreduce.jobhistory.TestEvents

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testEvents() throws Exception { EventReader reader=new EventReader(new DataInputStream(new ByteArrayInputStream(getEvents()))); HistoryEvent e=reader.getNextEvent(); assertTrue(e.getEventType().equals(EventType.JOB_PRIORITY_CHANGED)); assertEquals("ID",((JobPriorityChange)e.getDatum()).jobid.toString()); e=reader.getNextEvent(); assertTrue(e.getEventType().equals(EventType.JOB_STATUS_CHANGED)); assertEquals("ID",((JobStatusChanged)e.getDatum()).jobid.toString()); e=reader.getNextEvent(); assertTrue(e.getEventType().equals(EventType.TASK_UPDATED)); assertEquals("ID",((TaskUpdated)e.getDatum()).taskid.toString()); e=reader.getNextEvent(); assertTrue(e.getEventType().equals(EventType.REDUCE_ATTEMPT_KILLED)); assertEquals("task_1_2_r03_4",((TaskAttemptUnsuccessfulCompletion)e.getDatum()).taskid.toString()); e=reader.getNextEvent(); assertTrue(e.getEventType().equals(EventType.JOB_KILLED)); assertEquals("ID",((JobUnsuccessfulCompletion)e.getDatum()).jobid.toString()); e=reader.getNextEvent(); assertTrue(e.getEventType().equals(EventType.REDUCE_ATTEMPT_STARTED)); assertEquals("task_1_2_r03_4",((TaskAttemptStarted)e.getDatum()).taskid.toString()); e=reader.getNextEvent(); assertTrue(e.getEventType().equals(EventType.REDUCE_ATTEMPT_FINISHED)); assertEquals("task_1_2_r03_4",((TaskAttemptFinished)e.getDatum()).taskid.toString()); e=reader.getNextEvent(); assertTrue(e.getEventType().equals(EventType.REDUCE_ATTEMPT_KILLED)); assertEquals("task_1_2_r03_4",((TaskAttemptUnsuccessfulCompletion)e.getDatum()).taskid.toString()); e=reader.getNextEvent(); assertTrue(e.getEventType().equals(EventType.REDUCE_ATTEMPT_KILLED)); assertEquals("task_1_2_r03_4",((TaskAttemptUnsuccessfulCompletion)e.getDatum()).taskid.toString()); e=reader.getNextEvent(); assertTrue(e.getEventType().equals(EventType.REDUCE_ATTEMPT_STARTED)); assertEquals("task_1_2_r03_4",((TaskAttemptStarted)e.getDatum()).taskid.toString()); e=reader.getNextEvent(); assertTrue(e.getEventType().equals(EventType.REDUCE_ATTEMPT_FINISHED)); assertEquals("task_1_2_r03_4",((TaskAttemptFinished)e.getDatum()).taskid.toString()); e=reader.getNextEvent(); assertTrue(e.getEventType().equals(EventType.REDUCE_ATTEMPT_KILLED)); assertEquals("task_1_2_r03_4",((TaskAttemptUnsuccessfulCompletion)e.getDatum()).taskid.toString()); e=reader.getNextEvent(); assertTrue(e.getEventType().equals(EventType.REDUCE_ATTEMPT_KILLED)); assertEquals("task_1_2_r03_4",((TaskAttemptUnsuccessfulCompletion)e.getDatum()).taskid.toString()); reader.close(); }

Class: org.apache.hadoop.mapreduce.lib.input.TestCombineFileInputFormat

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test when input files are from non-default file systems */ @Test public void testForNonDefaultFileSystem() throws Throwable { Configuration conf=new Configuration(); conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY,DUMMY_FS_URI); assertEquals(DUMMY_FS_URI,FileSystem.getDefaultUri(conf).toString()); Path localPath=new Path("testFile1"); FileSystem lfs=FileSystem.getLocal(conf); FSDataOutputStream dos=lfs.create(localPath); dos.writeChars("Local file for CFIF"); dos.close(); Job job=Job.getInstance(conf); FileInputFormat.setInputPaths(job,lfs.makeQualified(localPath)); DummyInputFormat inFormat=new DummyInputFormat(); List splits=inFormat.getSplits(job); assertTrue(splits.size() > 0); for ( InputSplit s : splits) { CombineFileSplit cfs=(CombineFileSplit)s; for ( Path p : cfs.getPaths()) { assertEquals(p.toUri().getScheme(),"file"); } } }

Class: org.apache.hadoop.mapreduce.lib.input.TestCombineSequenceFileInputFormat

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testFormat() throws IOException, InterruptedException { Job job=Job.getInstance(conf); Random random=new Random(); long seed=random.nextLong(); random.setSeed(seed); localFs.delete(workDir,true); FileInputFormat.setInputPaths(job,workDir); final int length=10000; final int numFiles=10; createFiles(length,numFiles,random,job); TaskAttemptContext context=MapReduceTestUtil.createDummyMapTaskAttemptContext(job.getConfiguration()); InputFormat format=new CombineSequenceFileInputFormat(); for (int i=0; i < 3; i++) { int numSplits=random.nextInt(length / (SequenceFile.SYNC_INTERVAL / 20)) + 1; LOG.info("splitting: requesting = " + numSplits); List splits=format.getSplits(job); LOG.info("splitting: got = " + splits.size()); assertEquals("We got more than one splits!",1,splits.size()); InputSplit split=splits.get(0); assertEquals("It should be CombineFileSplit",CombineFileSplit.class,split.getClass()); BitSet bits=new BitSet(length); RecordReader reader=format.createRecordReader(split,context); MapContext mcontext=new MapContextImpl(job.getConfiguration(),context.getTaskAttemptID(),reader,null,null,MapReduceTestUtil.createDummyReporter(),split); reader.initialize(split,mcontext); assertEquals("reader class is CombineFileRecordReader.",CombineFileRecordReader.class,reader.getClass()); try { while (reader.nextKeyValue()) { IntWritable key=reader.getCurrentKey(); BytesWritable value=reader.getCurrentValue(); assertNotNull("Value should not be null.",value); final int k=key.get(); LOG.debug("read " + k); assertFalse("Key in multiple partitions.",bits.get(k)); bits.set(k); } } finally { reader.close(); } assertEquals("Some keys in no partition.",length,bits.cardinality()); } }

Class: org.apache.hadoop.mapreduce.lib.input.TestCombineTextInputFormat

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testFormat() throws Exception { Job job=Job.getInstance(new Configuration(defaultConf)); Random random=new Random(); long seed=random.nextLong(); LOG.info("seed = " + seed); random.setSeed(seed); localFs.delete(workDir,true); FileInputFormat.setInputPaths(job,workDir); final int length=10000; final int numFiles=10; createFiles(length,numFiles,random); CombineTextInputFormat format=new CombineTextInputFormat(); for (int i=0; i < 3; i++) { int numSplits=random.nextInt(length / 20) + 1; LOG.info("splitting: requesting = " + numSplits); List splits=format.getSplits(job); LOG.info("splitting: got = " + splits.size()); assertEquals("We got more than one splits!",1,splits.size()); InputSplit split=splits.get(0); assertEquals("It should be CombineFileSplit",CombineFileSplit.class,split.getClass()); BitSet bits=new BitSet(length); LOG.debug("split= " + split); TaskAttemptContext context=MapReduceTestUtil.createDummyMapTaskAttemptContext(job.getConfiguration()); RecordReader reader=format.createRecordReader(split,context); assertEquals("reader class is CombineFileRecordReader.",CombineFileRecordReader.class,reader.getClass()); MapContext mcontext=new MapContextImpl(job.getConfiguration(),context.getTaskAttemptID(),reader,null,null,MapReduceTestUtil.createDummyReporter(),split); reader.initialize(split,mcontext); try { int count=0; while (reader.nextKeyValue()) { LongWritable key=reader.getCurrentKey(); assertNotNull("Key should not be null.",key); Text value=reader.getCurrentValue(); final int v=Integer.parseInt(value.toString()); LOG.debug("read " + v); assertFalse("Key in multiple partitions.",bits.get(v)); bits.set(v); count++; } LOG.debug("split=" + split + " count="+ count); } finally { reader.close(); } assertEquals("Some keys in no partition.",length,bits.cardinality()); } }

APIUtilityVerifier BranchVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
/** * Test using the gzip codec for reading */ @Test(timeout=10000) public void testGzip() throws IOException, InterruptedException { Configuration conf=new Configuration(defaultConf); CompressionCodec gzip=new GzipCodec(); ReflectionUtils.setConf(gzip,conf); localFs.delete(workDir,true); writeFile(localFs,new Path(workDir,"part1.txt.gz"),gzip,"the quick\nbrown\nfox jumped\nover\n the lazy\n dog\n"); writeFile(localFs,new Path(workDir,"part2.txt.gz"),gzip,"this is a test\nof gzip\n"); Job job=Job.getInstance(conf); FileInputFormat.setInputPaths(job,workDir); CombineTextInputFormat format=new CombineTextInputFormat(); List splits=format.getSplits(job); assertEquals("compressed splits == 1",1,splits.size()); List results=readSplit(format,splits.get(0),job); assertEquals("splits[0] length",8,results.size()); final String[] firstList={"the quick","brown","fox jumped","over"," the lazy"," dog"}; final String[] secondList={"this is a test","of gzip"}; String first=results.get(0).toString(); if (first.equals(firstList[0])) { testResults(results,firstList,secondList); } else if (first.equals(secondList[0])) { testResults(results,secondList,firstList); } else { fail("unexpected first token!"); } }

Class: org.apache.hadoop.mapreduce.lib.input.TestFileInputFormat

UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testListStatusErrorOnNonExistantDir() throws IOException { Configuration conf=new Configuration(); conf.setInt(FileInputFormat.LIST_STATUS_NUM_THREADS,numThreads); configureTestErrorOnNonExistantDir(conf,localFs); Job job=Job.getInstance(conf); FileInputFormat fif=new TextInputFormat(); try { fif.listStatus(job); Assert.fail("Expecting an IOException for a missing Input path"); } catch ( IOException e) { Path expectedExceptionPath=new Path(TEST_ROOT_DIR,"input2"); expectedExceptionPath=localFs.makeQualified(expectedExceptionPath); Assert.assertTrue(e instanceof InvalidInputException); Assert.assertEquals("Input path does not exist: " + expectedExceptionPath.toString(),e.getMessage()); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testSplitLocationInfo() throws Exception { Configuration conf=getConfiguration(); conf.set(org.apache.hadoop.mapreduce.lib.input.FileInputFormat.INPUT_DIR,"test:///a1/a2"); Job job=Job.getInstance(conf); TextInputFormat fileInputFormat=new TextInputFormat(); List splits=fileInputFormat.getSplits(job); String[] locations=splits.get(0).getLocations(); Assert.assertEquals(2,locations.length); SplitLocationInfo[] locationInfo=splits.get(0).getLocationInfo(); Assert.assertEquals(2,locationInfo.length); SplitLocationInfo localhostInfo=locations[0].equals("localhost") ? locationInfo[0] : locationInfo[1]; SplitLocationInfo otherhostInfo=locations[0].equals("otherhost") ? locationInfo[0] : locationInfo[1]; Assert.assertTrue(localhostInfo.isOnDisk()); Assert.assertTrue(localhostInfo.isInMemory()); Assert.assertTrue(otherhostInfo.isOnDisk()); Assert.assertFalse(otherhostInfo.isInMemory()); }

Class: org.apache.hadoop.mapreduce.lib.input.TestLineRecordReader

BooleanVerifier NullVerifier HybridVerifier 
@Test public void testStripBOM() throws IOException { String UTF8_BOM="\uFEFF"; URL testFileUrl=getClass().getClassLoader().getResource("testBOM.txt"); assertNotNull("Cannot find testBOM.txt",testFileUrl); File testFile=new File(testFileUrl.getFile()); Path testFilePath=new Path(testFile.getAbsolutePath()); long testFileSize=testFile.length(); Configuration conf=new Configuration(); conf.setInt(org.apache.hadoop.mapreduce.lib.input.LineRecordReader.MAX_LINE_LENGTH,Integer.MAX_VALUE); TaskAttemptContext context=new TaskAttemptContextImpl(conf,new TaskAttemptID()); FileSplit split=new FileSplit(testFilePath,0,testFileSize,(String[])null); LineRecordReader reader=new LineRecordReader(); reader.initialize(split,context); int numRecords=0; boolean firstLine=true; boolean skipBOM=true; while (reader.nextKeyValue()) { if (firstLine) { firstLine=false; if (reader.getCurrentValue().toString().startsWith(UTF8_BOM)) { skipBOM=false; } } ++numRecords; } reader.close(); assertTrue("BOM is not skipped",skipBOM); }

Class: org.apache.hadoop.mapreduce.lib.input.TestMRKeyValueTextInputFormat

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testSplitableCodecs() throws Exception { final Job job=Job.getInstance(defaultConf); final Configuration conf=job.getConfiguration(); CompressionCodec codec=null; try { codec=(CompressionCodec)ReflectionUtils.newInstance(conf.getClassByName("org.apache.hadoop.io.compress.BZip2Codec"),conf); } catch ( ClassNotFoundException cnfe) { throw new IOException("Illegal codec!"); } Path file=new Path(workDir,"test" + codec.getDefaultExtension()); int seed=new Random().nextInt(); LOG.info("seed = " + seed); Random random=new Random(seed); localFs.delete(workDir,true); FileInputFormat.setInputPaths(job,workDir); final int MAX_LENGTH=500000; FileInputFormat.setMaxInputSplitSize(job,MAX_LENGTH / 20); for (int length=0; length < MAX_LENGTH; length+=random.nextInt(MAX_LENGTH / 4) + 1) { LOG.info("creating; entries = " + length); Writer writer=new OutputStreamWriter(codec.createOutputStream(localFs.create(file))); try { for (int i=0; i < length; i++) { writer.write(Integer.toString(i * 2)); writer.write("\t"); writer.write(Integer.toString(i)); writer.write("\n"); } } finally { writer.close(); } KeyValueTextInputFormat format=new KeyValueTextInputFormat(); assertTrue("KVTIF claims not splittable",format.isSplitable(job,file)); for (int i=0; i < 3; i++) { int numSplits=random.nextInt(MAX_LENGTH / 2000) + 1; LOG.info("splitting: requesting = " + numSplits); List splits=format.getSplits(job); LOG.info("splitting: got = " + splits.size()); BitSet bits=new BitSet(length); for (int j=0; j < splits.size(); j++) { LOG.debug("split[" + j + "]= "+ splits.get(j)); TaskAttemptContext context=MapReduceTestUtil.createDummyMapTaskAttemptContext(job.getConfiguration()); RecordReader reader=format.createRecordReader(splits.get(j),context); Class clazz=reader.getClass(); MapContext mcontext=new MapContextImpl(job.getConfiguration(),context.getTaskAttemptID(),reader,null,null,MapReduceTestUtil.createDummyReporter(),splits.get(j)); reader.initialize(splits.get(j),mcontext); Text key=null; Text value=null; try { int count=0; while (reader.nextKeyValue()) { key=reader.getCurrentKey(); value=reader.getCurrentValue(); final int k=Integer.parseInt(key.toString()); final int v=Integer.parseInt(value.toString()); assertEquals("Bad key",0,k % 2); assertEquals("Mismatched key/value",k / 2,v); LOG.debug("read " + k + ","+ v); assertFalse(k + "," + v+ " in multiple partitions.",bits.get(v)); bits.set(v); count++; } if (count > 0) { LOG.info("splits[" + j + "]="+ splits.get(j)+ " count="+ count); } else { LOG.debug("splits[" + j + "]="+ splits.get(j)+ " count="+ count); } } finally { reader.close(); } } assertEquals("Some keys in no partition.",length,bits.cardinality()); } } }

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testFormat() throws Exception { Job job=Job.getInstance(new Configuration(defaultConf)); Path file=new Path(workDir,"test.txt"); int seed=new Random().nextInt(); LOG.info("seed = " + seed); Random random=new Random(seed); localFs.delete(workDir,true); FileInputFormat.setInputPaths(job,workDir); final int MAX_LENGTH=10000; for (int length=0; length < MAX_LENGTH; length+=random.nextInt(MAX_LENGTH / 10) + 1) { LOG.debug("creating; entries = " + length); Writer writer=new OutputStreamWriter(localFs.create(file)); try { for (int i=0; i < length; i++) { writer.write(Integer.toString(i * 2)); writer.write("\t"); writer.write(Integer.toString(i)); writer.write("\n"); } } finally { writer.close(); } KeyValueTextInputFormat format=new KeyValueTextInputFormat(); for (int i=0; i < 3; i++) { int numSplits=random.nextInt(MAX_LENGTH / 20) + 1; LOG.debug("splitting: requesting = " + numSplits); List splits=format.getSplits(job); LOG.debug("splitting: got = " + splits.size()); BitSet bits=new BitSet(length); for (int j=0; j < splits.size(); j++) { LOG.debug("split[" + j + "]= "+ splits.get(j)); TaskAttemptContext context=MapReduceTestUtil.createDummyMapTaskAttemptContext(job.getConfiguration()); RecordReader reader=format.createRecordReader(splits.get(j),context); Class clazz=reader.getClass(); assertEquals("reader class is KeyValueLineRecordReader.",KeyValueLineRecordReader.class,clazz); MapContext mcontext=new MapContextImpl(job.getConfiguration(),context.getTaskAttemptID(),reader,null,null,MapReduceTestUtil.createDummyReporter(),splits.get(j)); reader.initialize(splits.get(j),mcontext); Text key=null; Text value=null; try { int count=0; while (reader.nextKeyValue()) { key=reader.getCurrentKey(); clazz=key.getClass(); assertEquals("Key class is Text.",Text.class,clazz); value=reader.getCurrentValue(); clazz=value.getClass(); assertEquals("Value class is Text.",Text.class,clazz); final int k=Integer.parseInt(key.toString()); final int v=Integer.parseInt(value.toString()); assertEquals("Bad key",0,k % 2); assertEquals("Mismatched key/value",k / 2,v); LOG.debug("read " + v); assertFalse("Key in multiple partitions.",bits.get(v)); bits.set(v); count++; } LOG.debug("splits[" + j + "]="+ splits.get(j)+ " count="+ count); } finally { reader.close(); } } assertEquals("Some keys in no partition.",length,bits.cardinality()); } } }

Class: org.apache.hadoop.mapreduce.lib.jobcontrol.TestMapReduceJobControl

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testControlledJob() throws Exception { LOG.info("Starting testControlledJob"); Configuration conf=createJobConf(); cleanupData(conf); Job job1=MapReduceTestUtil.createCopyJob(conf,outdir_1,indir); JobControl theControl=createDependencies(conf,job1); while (cjob1.getJobState() != ControlledJob.State.RUNNING) { try { Thread.sleep(100); } catch ( InterruptedException e) { break; } } Assert.assertNotNull(cjob1.getMapredJobId()); waitTillAllFinished(theControl); assertEquals("Some jobs failed",0,theControl.getFailedJobList().size()); theControl.stop(); }

Class: org.apache.hadoop.mapreduce.lib.jobcontrol.TestMapReduceJobControlWithMocks

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testSuccessfulJobs() throws Exception { JobControl jobControl=new JobControl("Test"); ControlledJob job1=createSuccessfulControlledJob(jobControl); ControlledJob job2=createSuccessfulControlledJob(jobControl); ControlledJob job3=createSuccessfulControlledJob(jobControl,job1,job2); ControlledJob job4=createSuccessfulControlledJob(jobControl,job3); runJobControl(jobControl); assertEquals("Success list",4,jobControl.getSuccessfulJobList().size()); assertEquals("Failed list",0,jobControl.getFailedJobList().size()); assertTrue(job1.getJobState() == ControlledJob.State.SUCCESS); assertTrue(job2.getJobState() == ControlledJob.State.SUCCESS); assertTrue(job3.getJobState() == ControlledJob.State.SUCCESS); assertTrue(job4.getJobState() == ControlledJob.State.SUCCESS); jobControl.stop(); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testErrorWhileSubmitting() throws Exception { JobControl jobControl=new JobControl("Test"); Job mockJob=mock(Job.class); ControlledJob job1=new ControlledJob(mockJob,null); when(mockJob.getConfiguration()).thenReturn(new Configuration()); doThrow(new IncompatibleClassChangeError("This is a test")).when(mockJob).submit(); jobControl.addJob(job1); runJobControl(jobControl); try { assertEquals("Success list",0,jobControl.getSuccessfulJobList().size()); assertEquals("Failed list",1,jobControl.getFailedJobList().size()); assertTrue(job1.getJobState() == ControlledJob.State.FAILED); } finally { jobControl.stop(); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testFailedJob() throws Exception { JobControl jobControl=new JobControl("Test"); ControlledJob job1=createFailedControlledJob(jobControl); ControlledJob job2=createSuccessfulControlledJob(jobControl); ControlledJob job3=createSuccessfulControlledJob(jobControl,job1,job2); ControlledJob job4=createSuccessfulControlledJob(jobControl,job3); runJobControl(jobControl); assertEquals("Success list",1,jobControl.getSuccessfulJobList().size()); assertEquals("Failed list",3,jobControl.getFailedJobList().size()); assertTrue(job1.getJobState() == ControlledJob.State.FAILED); assertTrue(job2.getJobState() == ControlledJob.State.SUCCESS); assertTrue(job3.getJobState() == ControlledJob.State.DEPENDENT_FAILED); assertTrue(job4.getJobState() == ControlledJob.State.DEPENDENT_FAILED); jobControl.stop(); }

Class: org.apache.hadoop.mapreduce.security.TestBinaryTokenFile

APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
/** * run a distributed job and verify that TokenCache is available * @throws IOException */ @Test public void testBinaryTokenFile() throws IOException { Configuration conf=mrCluster.getConfig(); final String nnUri=dfsCluster.getURI(0).toString(); conf.set(MRJobConfig.JOB_NAMENODES,nnUri + "," + nnUri); final String[] args={"-m","1","-r","1","-mt","1","-rt","1"}; int res=-1; try { res=ToolRunner.run(conf,new MySleepJob(),args); } catch ( Exception e) { System.out.println("Job failed with " + e.getLocalizedMessage()); e.printStackTrace(System.out); fail("Job failed"); } assertEquals("dist job res is not 0:",0,res); }

Class: org.apache.hadoop.mapreduce.security.TestJHSSecurity

UtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testDelegationToken() throws IOException, InterruptedException { Logger rootLogger=LogManager.getRootLogger(); rootLogger.setLevel(Level.DEBUG); final YarnConfiguration conf=new YarnConfiguration(new JobConf()); conf.set(JHAdminConfig.MR_HISTORY_PRINCIPAL,"RandomOrc/localhost@apache.org"); conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,"kerberos"); UserGroupInformation.setConfiguration(conf); final long initialInterval=10000l; final long maxLifetime=20000l; final long renewInterval=10000l; JobHistoryServer jobHistoryServer=null; MRClientProtocol clientUsingDT=null; long tokenFetchTime; try { jobHistoryServer=new JobHistoryServer(){ protected void doSecureLogin( Configuration conf) throws IOException { } @Override protected JHSDelegationTokenSecretManager createJHSSecretManager( Configuration conf, HistoryServerStateStoreService store){ return new JHSDelegationTokenSecretManager(initialInterval,maxLifetime,renewInterval,3600000,store); } @Override protected HistoryClientService createHistoryClientService(){ return new HistoryClientService(historyContext,this.jhsDTSecretManager){ @Override protected void initializeWebApp( Configuration conf){ } } ; } } ; jobHistoryServer.init(conf); jobHistoryServer.start(); final MRClientProtocol hsService=jobHistoryServer.getClientService().getClientHandler(); UserGroupInformation loggedInUser=UserGroupInformation.createRemoteUser("testrenewer@APACHE.ORG"); Assert.assertEquals("testrenewer",loggedInUser.getShortUserName()); loggedInUser.setAuthenticationMethod(AuthenticationMethod.KERBEROS); Token token=getDelegationToken(loggedInUser,hsService,loggedInUser.getShortUserName()); tokenFetchTime=System.currentTimeMillis(); LOG.info("Got delegation token at: " + tokenFetchTime); clientUsingDT=getMRClientProtocol(token,jobHistoryServer.getClientService().getBindAddress(),"TheDarkLord",conf); GetJobReportRequest jobReportRequest=Records.newRecord(GetJobReportRequest.class); jobReportRequest.setJobId(MRBuilderUtils.newJobId(123456,1,1)); try { clientUsingDT.getJobReport(jobReportRequest); } catch ( IOException e) { Assert.assertEquals("Unknown job job_123456_0001",e.getMessage()); } while (System.currentTimeMillis() < tokenFetchTime + initialInterval / 2) { Thread.sleep(500l); } long nextExpTime=renewDelegationToken(loggedInUser,hsService,token); long renewalTime=System.currentTimeMillis(); LOG.info("Renewed token at: " + renewalTime + ", NextExpiryTime: "+ nextExpTime); while (System.currentTimeMillis() > tokenFetchTime + initialInterval && System.currentTimeMillis() < nextExpTime) { Thread.sleep(500l); } Thread.sleep(50l); try { clientUsingDT.getJobReport(jobReportRequest); } catch ( IOException e) { Assert.assertEquals("Unknown job job_123456_0001",e.getMessage()); } while (System.currentTimeMillis() < renewalTime + renewInterval) { Thread.sleep(500l); } Thread.sleep(50l); LOG.info("At time: " + System.currentTimeMillis() + ", token should be invalid"); try { clientUsingDT.getJobReport(jobReportRequest); fail("Should not have succeeded with an expired token"); } catch ( IOException e) { assertTrue(e.getCause().getMessage().contains("is expired")); } if (clientUsingDT != null) { clientUsingDT=null; } token=getDelegationToken(loggedInUser,hsService,loggedInUser.getShortUserName()); tokenFetchTime=System.currentTimeMillis(); LOG.info("Got delegation token at: " + tokenFetchTime); clientUsingDT=getMRClientProtocol(token,jobHistoryServer.getClientService().getBindAddress(),"loginuser2",conf); try { clientUsingDT.getJobReport(jobReportRequest); } catch ( IOException e) { fail("Unexpected exception" + e); } cancelDelegationToken(loggedInUser,hsService,token); Token tokenWithDifferentRenewer=getDelegationToken(loggedInUser,hsService,"yarn"); cancelDelegationToken(loggedInUser,hsService,tokenWithDifferentRenewer); if (clientUsingDT != null) { clientUsingDT=null; } clientUsingDT=getMRClientProtocol(token,jobHistoryServer.getClientService().getBindAddress(),"loginuser2",conf); LOG.info("Cancelled delegation token at: " + System.currentTimeMillis()); try { clientUsingDT.getJobReport(jobReportRequest); fail("Should not have succeeded with a cancelled delegation token"); } catch ( IOException e) { } } finally { jobHistoryServer.stop(); } }

Class: org.apache.hadoop.mapreduce.security.TestMRCredentials

APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
/** * run a distributed job and verify that TokenCache is available * @throws IOException */ @Test public void test() throws IOException { Configuration jobConf=new JobConf(mrCluster.getConfig()); NameNode nn=dfsCluster.getNameNode(); URI nnUri=NameNode.getUri(nn.getNameNodeAddress()); jobConf.set(JobContext.JOB_NAMENODES,nnUri + "," + nnUri.toString()); jobConf.set("mapreduce.job.credentials.json","keys.json"); String[] args={"-m","1","-r","1","-mt","1","-rt","1"}; int res=-1; try { res=ToolRunner.run(jobConf,new CredentialsTestJob(),args); } catch ( Exception e) { System.out.println("Job failed with" + e.getLocalizedMessage()); e.printStackTrace(System.out); fail("Job failed"); } assertEquals("dist job res is not 0",res,0); }

Class: org.apache.hadoop.mapreduce.security.TestTokenCache

InternalCallVerifier BooleanVerifier IdentityVerifier HybridVerifier 
@Test @SuppressWarnings("deprecation") public void testBinaryCredentials() throws Exception { Path TEST_ROOT_DIR=new Path(System.getProperty("test.build.data","test/build/data")); String binaryTokenFile=FileSystem.getLocal(conf).makeQualified(new Path(TEST_ROOT_DIR,"tokenFile")).toUri().getPath(); MockFileSystem fs1=createFileSystemForServiceName("service1"); MockFileSystem fs2=createFileSystemForServiceName("service2"); MockFileSystem fs3=createFileSystemForServiceName("service3"); Credentials creds=new Credentials(); Token token1=fs1.getDelegationToken(renewer); Token token2=fs2.getDelegationToken(renewer); creds.addToken(token1.getService(),token1); creds.addToken(token2.getService(),token2); conf.set(MRJobConfig.MAPREDUCE_JOB_CREDENTIALS_BINARY,binaryTokenFile); creds.writeTokenStorageFile(new Path(binaryTokenFile),conf); creds=new Credentials(); Token newerToken1=fs1.getDelegationToken(renewer); assertNotSame(newerToken1,token1); creds.addToken(newerToken1.getService(),newerToken1); checkToken(creds,newerToken1); TokenCache.obtainTokensForNamenodesInternal(fs1,creds,conf); checkToken(creds,newerToken1,token2); TokenCache.obtainTokensForNamenodesInternal(fs2,creds,conf); checkToken(creds,newerToken1,token2); TokenCache.obtainTokensForNamenodesInternal(fs3,creds,conf); Token token3=creds.getToken(new Text(fs3.getCanonicalServiceName())); assertTrue(token3 != null); checkToken(creds,newerToken1,token2,token3); TokenCache.obtainTokensForNamenodesInternal(fs1,creds,conf); TokenCache.obtainTokensForNamenodesInternal(fs2,creds,conf); TokenCache.obtainTokensForNamenodesInternal(fs3,creds,conf); checkToken(creds,newerToken1,token2,token3); }

Class: org.apache.hadoop.mapreduce.security.token.delegation.TestDelegationToken

UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@SuppressWarnings("deprecation") @Test public void testDelegationToken() throws Exception { final JobClient client; client=user1.doAs(new PrivilegedExceptionAction(){ @Override public JobClient run() throws Exception { return new JobClient(cluster.createJobConf()); } } ); final JobClient bobClient; bobClient=user2.doAs(new PrivilegedExceptionAction(){ @Override public JobClient run() throws Exception { return new JobClient(cluster.createJobConf()); } } ); final Token token=client.getDelegationToken(new Text(user1.getUserName())); DataInputBuffer inBuf=new DataInputBuffer(); byte[] bytes=token.getIdentifier(); inBuf.reset(bytes,bytes.length); DelegationTokenIdentifier ident=new DelegationTokenIdentifier(); ident.readFields(inBuf); assertEquals("alice",ident.getUser().getUserName()); long createTime=ident.getIssueDate(); long maxTime=ident.getMaxDate(); long currentTime=System.currentTimeMillis(); System.out.println("create time: " + createTime); System.out.println("current time: " + currentTime); System.out.println("max time: " + maxTime); assertTrue("createTime < current",createTime < currentTime); assertTrue("current < maxTime",currentTime < maxTime); user1.doAs(new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { client.renewDelegationToken(token); client.renewDelegationToken(token); return null; } } ); user2.doAs(new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { try { bobClient.renewDelegationToken(token); Assert.fail("bob renew"); } catch ( AccessControlException ace) { } return null; } } ); user2.doAs(new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { try { bobClient.cancelDelegationToken(token); Assert.fail("bob cancel"); } catch ( AccessControlException ace) { } return null; } } ); user1.doAs(new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { client.cancelDelegationToken(token); try { client.cancelDelegationToken(token); Assert.fail("second alice cancel"); } catch ( InvalidToken it) { } return null; } } ); }

Class: org.apache.hadoop.mapreduce.task.reduce.TestMergeManager

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testMemoryMerge() throws Exception { final int TOTAL_MEM_BYTES=10000; final int OUTPUT_SIZE=7950; JobConf conf=new JobConf(); conf.setFloat(MRJobConfig.SHUFFLE_INPUT_BUFFER_PERCENT,1.0f); conf.setLong(MRJobConfig.REDUCE_MEMORY_TOTAL_BYTES,TOTAL_MEM_BYTES); conf.setFloat(MRJobConfig.SHUFFLE_MEMORY_LIMIT_PERCENT,0.8f); conf.setFloat(MRJobConfig.SHUFFLE_MERGE_PERCENT,0.9f); TestExceptionReporter reporter=new TestExceptionReporter(); CyclicBarrier mergeStart=new CyclicBarrier(2); CyclicBarrier mergeComplete=new CyclicBarrier(2); StubbedMergeManager mgr=new StubbedMergeManager(conf,reporter,mergeStart,mergeComplete); MapOutput out1=mgr.reserve(null,OUTPUT_SIZE,0); Assert.assertTrue("Should be a memory merge",(out1 instanceof InMemoryMapOutput)); InMemoryMapOutput mout1=(InMemoryMapOutput)out1; fillOutput(mout1); MapOutput out2=mgr.reserve(null,OUTPUT_SIZE,0); Assert.assertTrue("Should be a memory merge",(out2 instanceof InMemoryMapOutput)); InMemoryMapOutput mout2=(InMemoryMapOutput)out2; fillOutput(mout2); MapOutput out3=mgr.reserve(null,OUTPUT_SIZE,0); Assert.assertEquals("Should be told to wait",null,out3); mout1.commit(); mout2.commit(); mergeStart.await(); Assert.assertEquals(1,mgr.getNumMerges()); out1=mgr.reserve(null,OUTPUT_SIZE,0); Assert.assertTrue("Should be a memory merge",(out1 instanceof InMemoryMapOutput)); mout1=(InMemoryMapOutput)out1; fillOutput(mout1); out2=mgr.reserve(null,OUTPUT_SIZE,0); Assert.assertTrue("Should be a memory merge",(out2 instanceof InMemoryMapOutput)); mout2=(InMemoryMapOutput)out2; fillOutput(mout2); out3=mgr.reserve(null,OUTPUT_SIZE,0); Assert.assertEquals("Should be told to wait",null,out3); mout1.commit(); mout2.commit(); mergeComplete.await(); mergeStart.await(); Assert.assertEquals(2,mgr.getNumMerges()); mergeComplete.await(); Assert.assertEquals(2,mgr.getNumMerges()); Assert.assertEquals("exception reporter invoked",0,reporter.getNumExceptions()); }

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@SuppressWarnings({"unchecked","deprecation"}) @Test(timeout=10000) public void testOnDiskMerger() throws IOException, URISyntaxException, InterruptedException { JobConf jobConf=new JobConf(); final int SORT_FACTOR=5; jobConf.setInt(MRJobConfig.IO_SORT_FACTOR,SORT_FACTOR); MapOutputFile mapOutputFile=new MROutputFiles(); FileSystem fs=FileSystem.getLocal(jobConf); MergeManagerImpl manager=new MergeManagerImpl(null,jobConf,fs,null,null,null,null,null,null,null,null,null,null,mapOutputFile); MergeThread,IntWritable,IntWritable> onDiskMerger=(MergeThread,IntWritable,IntWritable>)Whitebox.getInternalState(manager,"onDiskMerger"); int mergeFactor=(Integer)Whitebox.getInternalState(onDiskMerger,"mergeFactor"); assertEquals(mergeFactor,SORT_FACTOR); onDiskMerger.suspend(); Random rand=new Random(); for (int i=0; i < 2 * SORT_FACTOR; ++i) { Path path=new Path("somePath"); CompressAwarePath cap=new CompressAwarePath(path,1l,rand.nextInt()); manager.closeOnDiskFile(cap); } LinkedList> pendingToBeMerged=(LinkedList>)Whitebox.getInternalState(onDiskMerger,"pendingToBeMerged"); assertTrue("No inputs were added to list pending to merge",pendingToBeMerged.size() > 0); for (int i=0; i < pendingToBeMerged.size(); ++i) { List inputs=pendingToBeMerged.get(i); for (int j=1; j < inputs.size(); ++j) { assertTrue("Not enough / too many inputs were going to be merged",inputs.size() > 0 && inputs.size() <= SORT_FACTOR); assertTrue("Inputs to be merged were not sorted according to size: ",inputs.get(j).getCompressedSize() >= inputs.get(j - 1).getCompressedSize()); } } }

Class: org.apache.hadoop.mapreduce.task.reduce.TestShuffleScheduler

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@SuppressWarnings("rawtypes") @Test public void testTipFailed() throws Exception { JobConf job=new JobConf(); job.setNumMapTasks(2); TaskStatus status=new TaskStatus(){ @Override public boolean getIsMap(){ return false; } @Override public void addFetchFailedMap( TaskAttemptID mapTaskId){ } } ; Progress progress=new Progress(); TaskAttemptID reduceId=new TaskAttemptID("314159",0,TaskType.REDUCE,0,0); ShuffleSchedulerImpl scheduler=new ShuffleSchedulerImpl(job,status,reduceId,null,progress,null,null,null); JobID jobId=new JobID(); TaskID taskId1=new TaskID(jobId,TaskType.REDUCE,1); scheduler.tipFailed(taskId1); Assert.assertEquals("Progress should be 0.5",0.5f,progress.getProgress(),0.0f); Assert.assertFalse(scheduler.waitUntilDone(1)); TaskID taskId0=new TaskID(jobId,TaskType.REDUCE,0); scheduler.tipFailed(taskId0); Assert.assertEquals("Progress should be 1.0",1.0f,progress.getProgress(),0.0f); Assert.assertTrue(scheduler.waitUntilDone(1)); }

Class: org.apache.hadoop.mapreduce.util.TestMRAsyncDiskService

InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier 
/** * This test creates some directories and then removes them through * MRAsyncDiskService. */ @Test public void testMRAsyncDiskService() throws Throwable { FileSystem localFileSystem=FileSystem.getLocal(new Configuration()); String[] vols=new String[]{TEST_ROOT_DIR + "/0",TEST_ROOT_DIR + "/1"}; MRAsyncDiskService service=new MRAsyncDiskService(localFileSystem,vols); String a="a"; String b="b"; String c="b/c"; String d="d"; File fa=new File(vols[0],a); File fb=new File(vols[1],b); File fc=new File(vols[1],c); File fd=new File(vols[1],d); fa.mkdirs(); fb.mkdirs(); fc.mkdirs(); fd.mkdirs(); assertTrue(fa.exists()); assertTrue(fb.exists()); assertTrue(fc.exists()); assertTrue(fd.exists()); service.moveAndDeleteRelativePath(vols[0],a); assertFalse(fa.exists()); service.moveAndDeleteRelativePath(vols[1],b); assertFalse(fb.exists()); assertFalse(fc.exists()); assertFalse(service.moveAndDeleteRelativePath(vols[1],"not_exists")); IOException ee=null; try { service.moveAndDeleteAbsolutePath(TEST_ROOT_DIR + "/2"); } catch ( IOException e) { ee=e; } assertNotNull("asyncDiskService should not be able to delete files " + "outside all volumes",ee); assertTrue(service.moveAndDeleteAbsolutePath(vols[1] + Path.SEPARATOR_CHAR + d)); makeSureCleanedUp(vols,service); }

BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testToleratesSomeUnwritableVolumes() throws Throwable { FileSystem localFileSystem=FileSystem.getLocal(new Configuration()); String[] vols=new String[]{TEST_ROOT_DIR + "/0",TEST_ROOT_DIR + "/1"}; assertTrue(new File(vols[0]).mkdirs()); assertEquals(0,FileUtil.chmod(vols[0],"400")); try { new MRAsyncDiskService(localFileSystem,vols); } finally { FileUtil.chmod(vols[0],"755"); } }

Class: org.apache.hadoop.mapreduce.v2.TestMRAMWithNonNormalizedCapabilities

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * To ensure nothing broken after we removed normalization * from the MRAM side * @throws Exception */ @Test public void testJobWithNonNormalizedCapabilities() throws Exception { if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) { LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR + " not found. Not running test."); return; } JobConf jobConf=new JobConf(mrCluster.getConfig()); jobConf.setInt("mapreduce.map.memory.mb",700); jobConf.setInt("mapred.reduce.memory.mb",1500); SleepJob sleepJob=new SleepJob(); sleepJob.setConf(jobConf); Job job=sleepJob.createJob(3,2,1000,1,500,1); job.setJarByClass(SleepJob.class); job.addFileToClassPath(APP_JAR); job.submit(); boolean completed=job.waitForCompletion(true); Assert.assertTrue("Job should be completed",completed); Assert.assertEquals("Job should be finished successfully",JobStatus.State.SUCCEEDED,job.getJobState()); }

Class: org.apache.hadoop.mapreduce.v2.TestMRJobs

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=300000) public void testSleepJob() throws IOException, InterruptedException, ClassNotFoundException { LOG.info("\n\n\nStarting testSleepJob()."); if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) { LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR + " not found. Not running test."); return; } Configuration sleepConf=new Configuration(mrCluster.getConfig()); sleepConf.set(MRConfig.MASTER_ADDRESS,"local"); SleepJob sleepJob=new SleepJob(); sleepJob.setConf(sleepConf); int numReduces=sleepConf.getInt("TestMRJobs.testSleepJob.reduces",2); Job job=sleepJob.createJob(3,numReduces,10000,1,5000,1); job.addFileToClassPath(APP_JAR); job.setJarByClass(SleepJob.class); job.setMaxMapAttempts(1); job.submit(); String trackingUrl=job.getTrackingURL(); String jobId=job.getJobID().toString(); boolean succeeded=job.waitForCompletion(true); Assert.assertTrue(succeeded); Assert.assertEquals(JobStatus.State.SUCCEEDED,job.getJobState()); Assert.assertTrue("Tracking URL was " + trackingUrl + " but didn't Match Job ID "+ jobId,trackingUrl.endsWith(jobId.substring(jobId.lastIndexOf("_")) + "/")); verifySleepJobCounters(job); verifyTaskProgress(job); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=60000) public void testRandomWriter() throws IOException, InterruptedException, ClassNotFoundException { LOG.info("\n\n\nStarting testRandomWriter()."); if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) { LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR + " not found. Not running test."); return; } RandomTextWriterJob randomWriterJob=new RandomTextWriterJob(); mrCluster.getConfig().set(RandomTextWriterJob.TOTAL_BYTES,"3072"); mrCluster.getConfig().set(RandomTextWriterJob.BYTES_PER_MAP,"1024"); Job job=randomWriterJob.createJob(mrCluster.getConfig()); Path outputDir=new Path(OUTPUT_ROOT_DIR,"random-output"); FileOutputFormat.setOutputPath(job,outputDir); job.setSpeculativeExecution(false); job.addFileToClassPath(APP_JAR); job.setJarByClass(RandomTextWriterJob.class); job.setMaxMapAttempts(1); job.submit(); String trackingUrl=job.getTrackingURL(); String jobId=job.getJobID().toString(); boolean succeeded=job.waitForCompletion(true); Assert.assertTrue(succeeded); Assert.assertEquals(JobStatus.State.SUCCEEDED,job.getJobState()); Assert.assertTrue("Tracking URL was " + trackingUrl + " but didn't Match Job ID "+ jobId,trackingUrl.endsWith(jobId.substring(jobId.lastIndexOf("_")) + "/")); RemoteIterator iterator=FileContext.getFileContext(mrCluster.getConfig()).listStatus(outputDir); int count=0; while (iterator.hasNext()) { FileStatus file=iterator.next(); if (!file.getPath().getName().equals(FileOutputCommitter.SUCCEEDED_FILE_NAME)) { count++; } } Assert.assertEquals("Number of part files is wrong!",3,count); verifyRandomWriterCounters(job); }

IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier 
@Test(timeout=120000) public void testContainerRollingLog() throws IOException, InterruptedException, ClassNotFoundException { if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) { LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR + " not found. Not running test."); return; } final SleepJob sleepJob=new SleepJob(); final JobConf sleepConf=new JobConf(mrCluster.getConfig()); sleepConf.set(MRJobConfig.MAP_LOG_LEVEL,Level.ALL.toString()); final long userLogKb=4; sleepConf.setLong(MRJobConfig.TASK_USERLOG_LIMIT,userLogKb); sleepConf.setInt(MRJobConfig.TASK_LOG_BACKUPS,3); sleepConf.set(MRJobConfig.MR_AM_LOG_LEVEL,Level.ALL.toString()); final long amLogKb=7; sleepConf.setLong(MRJobConfig.MR_AM_LOG_KB,amLogKb); sleepConf.setInt(MRJobConfig.MR_AM_LOG_BACKUPS,7); sleepJob.setConf(sleepConf); final Job job=sleepJob.createJob(1,0,1L,100,0L,0); job.setJarByClass(SleepJob.class); job.addFileToClassPath(APP_JAR); job.waitForCompletion(true); final JobId jobId=TypeConverter.toYarn(job.getJobID()); final ApplicationId appID=jobId.getAppId(); int pollElapsed=0; while (true) { Thread.sleep(1000); pollElapsed+=1000; if (TERMINAL_RM_APP_STATES.contains(mrCluster.getResourceManager().getRMContext().getRMApps().get(appID).getState())) { break; } if (pollElapsed >= 60000) { LOG.warn("application did not reach terminal state within 60 seconds"); break; } } Assert.assertEquals(RMAppState.FINISHED,mrCluster.getResourceManager().getRMContext().getRMApps().get(appID).getState()); final String appIdStr=appID.toString(); final String appIdSuffix=appIdStr.substring("application_".length(),appIdStr.length()); final String containerGlob="container_" + appIdSuffix + "_*_*"; final String syslogGlob=appIdStr + Path.SEPARATOR + containerGlob+ Path.SEPARATOR+ TaskLog.LogName.SYSLOG; int numAppMasters=0; int numMapTasks=0; for (int i=0; i < NUM_NODE_MGRS; i++) { final Configuration nmConf=mrCluster.getNodeManager(i).getConfig(); for ( String logDir : nmConf.getTrimmedStrings(YarnConfiguration.NM_LOG_DIRS)) { final Path absSyslogGlob=new Path(logDir + Path.SEPARATOR + syslogGlob); LOG.info("Checking for glob: " + absSyslogGlob); final FileStatus[] syslogs=localFs.globStatus(absSyslogGlob); for ( FileStatus slog : syslogs) { boolean foundAppMaster=job.isUber(); final Path containerPathComponent=slog.getPath().getParent(); if (!foundAppMaster) { final ContainerId cid=ConverterUtils.toContainerId(containerPathComponent.getName()); foundAppMaster=(cid.getId() == 1); } final FileStatus[] sysSiblings=localFs.globStatus(new Path(containerPathComponent,TaskLog.LogName.SYSLOG + "*")); Arrays.sort(sysSiblings); if (foundAppMaster) { numAppMasters++; } else { numMapTasks++; } if (foundAppMaster) { Assert.assertSame("Unexpected number of AM sylog* files",sleepConf.getInt(MRJobConfig.MR_AM_LOG_BACKUPS,0) + 1,sysSiblings.length); Assert.assertTrue("AM syslog.1 length kb should be >= " + amLogKb,sysSiblings[1].getLen() >= amLogKb * 1024); } else { Assert.assertSame("Unexpected number of MR task sylog* files",sleepConf.getInt(MRJobConfig.TASK_LOG_BACKUPS,0) + 1,sysSiblings.length); Assert.assertTrue("MR syslog.1 length kb should be >= " + userLogKb,sysSiblings[1].getLen() >= userLogKb * 1024); } } } } Assert.assertEquals("No AppMaster log found!",1,numAppMasters); if (sleepConf.getBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false)) { Assert.assertEquals("MapTask log with uber found!",0,numMapTasks); } else { Assert.assertEquals("No MapTask log found!",1,numMapTasks); } }

Class: org.apache.hadoop.mapreduce.v2.TestRMNMInfo

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testRMNMInfo() throws Exception { if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) { LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR + " not found. Not running test."); return; } RMContext rmc=mrCluster.getResourceManager().getRMContext(); ResourceScheduler rms=mrCluster.getResourceManager().getResourceScheduler(); RMNMInfo rmInfo=new RMNMInfo(rmc,rms); String liveNMs=rmInfo.getLiveNodeManagers(); ObjectMapper mapper=new ObjectMapper(); JsonNode jn=mapper.readTree(liveNMs); Assert.assertEquals("Unexpected number of live nodes:",NUMNODEMANAGERS,jn.size()); Iterator it=jn.iterator(); while (it.hasNext()) { JsonNode n=it.next(); Assert.assertNotNull(n.get("HostName")); Assert.assertNotNull(n.get("Rack")); Assert.assertTrue("Node " + n.get("NodeId") + " should be RUNNING",n.get("State").asText().contains("RUNNING")); Assert.assertNotNull(n.get("NodeHTTPAddress")); Assert.assertNotNull(n.get("LastHealthUpdate")); Assert.assertNotNull(n.get("HealthReport")); Assert.assertNotNull(n.get("NodeManagerVersion")); Assert.assertNotNull(n.get("NumContainers")); Assert.assertEquals(n.get("NodeId") + ": Unexpected number of used containers",0,n.get("NumContainers").asInt()); Assert.assertEquals(n.get("NodeId") + ": Unexpected amount of used memory",0,n.get("UsedMemoryMB").asInt()); Assert.assertNotNull(n.get("AvailableMemoryMB")); } }

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testRMNMInfoMissmatch() throws Exception { RMContext rmc=mock(RMContext.class); ResourceScheduler rms=mock(ResourceScheduler.class); ConcurrentMap map=new ConcurrentHashMap(); RMNode node=MockNodes.newNodeInfo(1,MockNodes.newResource(4 * 1024)); map.put(node.getNodeID(),node); when(rmc.getRMNodes()).thenReturn(map); RMNMInfo rmInfo=new RMNMInfo(rmc,rms); String liveNMs=rmInfo.getLiveNodeManagers(); ObjectMapper mapper=new ObjectMapper(); JsonNode jn=mapper.readTree(liveNMs); Assert.assertEquals("Unexpected number of live nodes:",1,jn.size()); Iterator it=jn.iterator(); while (it.hasNext()) { JsonNode n=it.next(); Assert.assertNotNull(n.get("HostName")); Assert.assertNotNull(n.get("Rack")); Assert.assertTrue("Node " + n.get("NodeId") + " should be RUNNING",n.get("State").asText().contains("RUNNING")); Assert.assertNotNull(n.get("NodeHTTPAddress")); Assert.assertNotNull(n.get("LastHealthUpdate")); Assert.assertNotNull(n.get("HealthReport")); Assert.assertNotNull(n.get("NodeManagerVersion")); Assert.assertNull(n.get("NumContainers")); Assert.assertNull(n.get("UsedMemoryMB")); Assert.assertNull(n.get("AvailableMemoryMB")); } }

Class: org.apache.hadoop.mapreduce.v2.TestRecordFactory

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testPbRecordFactory(){ RecordFactory pbRecordFactory=RecordFactoryPBImpl.get(); try { CounterGroup response=pbRecordFactory.newRecordInstance(CounterGroup.class); Assert.assertEquals(CounterGroupPBImpl.class,response.getClass()); } catch ( YarnRuntimeException e) { e.printStackTrace(); Assert.fail("Failed to crete record"); } try { GetCountersRequest response=pbRecordFactory.newRecordInstance(GetCountersRequest.class); Assert.assertEquals(GetCountersRequestPBImpl.class,response.getClass()); } catch ( YarnRuntimeException e) { e.printStackTrace(); Assert.fail("Failed to crete record"); } }

Class: org.apache.hadoop.mapreduce.v2.TestSpeculativeExecution

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testSpeculativeExecution() throws Exception { if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) { LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR + " not found. Not running test."); return; } Job job=runSpecTest(false,false); boolean succeeded=job.waitForCompletion(true); Assert.assertTrue(succeeded); Assert.assertEquals(JobStatus.State.SUCCEEDED,job.getJobState()); Counters counters=job.getCounters(); Assert.assertEquals(2,counters.findCounter(JobCounter.TOTAL_LAUNCHED_MAPS).getValue()); Assert.assertEquals(2,counters.findCounter(JobCounter.TOTAL_LAUNCHED_REDUCES).getValue()); Assert.assertEquals(0,counters.findCounter(JobCounter.NUM_FAILED_MAPS).getValue()); job=runSpecTest(true,false); succeeded=job.waitForCompletion(true); Assert.assertTrue(succeeded); Assert.assertEquals(JobStatus.State.SUCCEEDED,job.getJobState()); counters=job.getCounters(); Assert.assertEquals(3,counters.findCounter(JobCounter.TOTAL_LAUNCHED_MAPS).getValue()); Assert.assertEquals(2,counters.findCounter(JobCounter.TOTAL_LAUNCHED_REDUCES).getValue()); Assert.assertEquals(0,counters.findCounter(JobCounter.NUM_FAILED_MAPS).getValue()); Assert.assertEquals(1,counters.findCounter(JobCounter.NUM_KILLED_MAPS).getValue()); job=runSpecTest(false,true); succeeded=job.waitForCompletion(true); Assert.assertTrue(succeeded); Assert.assertEquals(JobStatus.State.SUCCEEDED,job.getJobState()); counters=job.getCounters(); Assert.assertEquals(2,counters.findCounter(JobCounter.TOTAL_LAUNCHED_MAPS).getValue()); Assert.assertEquals(3,counters.findCounter(JobCounter.TOTAL_LAUNCHED_REDUCES).getValue()); }

Class: org.apache.hadoop.mapreduce.v2.TestUberAM

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Override @Test public void testFailingMapper() throws IOException, InterruptedException, ClassNotFoundException { LOG.info("\n\n\nStarting uberized testFailingMapper()."); if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) { LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR + " not found. Not running test."); return; } Job job=runFailingMapperJob(); TaskID taskID=new TaskID(job.getJobID(),TaskType.MAP,0); TaskAttemptID aId=new TaskAttemptID(taskID,0); System.out.println("Diagnostics for " + aId + " :"); for ( String diag : job.getTaskDiagnostics(aId)) { System.out.println(diag); } boolean secondTaskAttemptExists=true; try { aId=new TaskAttemptID(taskID,1); System.out.println("Diagnostics for " + aId + " :"); for ( String diag : job.getTaskDiagnostics(aId)) { System.out.println(diag); } } catch ( Exception e) { secondTaskAttemptExists=false; } Assert.assertEquals(false,secondTaskAttemptExists); TaskCompletionEvent[] events=job.getTaskCompletionEvents(0,2); Assert.assertEquals(1,events.length); TaskCompletionEvent.Status status=events[0].getStatus(); Assert.assertTrue(status == TaskCompletionEvent.Status.FAILED || status == TaskCompletionEvent.Status.TIPFAILED); Assert.assertEquals(JobStatus.State.FAILED,job.getJobState()); }

Class: org.apache.hadoop.mapreduce.v2.api.records.TestIds

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testTaskAttemptId(){ long ts1=1315890136000l; long ts2=1315890136001l; TaskAttemptId t1=createTaskAttemptId(ts1,2,2,TaskType.MAP,2); TaskAttemptId t2=createTaskAttemptId(ts1,2,2,TaskType.REDUCE,2); TaskAttemptId t3=createTaskAttemptId(ts1,2,2,TaskType.MAP,3); TaskAttemptId t4=createTaskAttemptId(ts1,2,2,TaskType.MAP,1); TaskAttemptId t5=createTaskAttemptId(ts1,2,1,TaskType.MAP,3); TaskAttemptId t6=createTaskAttemptId(ts1,2,2,TaskType.MAP,2); assertTrue(t1.equals(t6)); assertFalse(t1.equals(t2)); assertFalse(t1.equals(t3)); assertFalse(t1.equals(t5)); assertTrue(t1.compareTo(t6) == 0); assertTrue(t1.compareTo(t2) < 0); assertTrue(t1.compareTo(t3) < 0); assertTrue(t1.compareTo(t4) > 0); assertTrue(t1.compareTo(t5) > 0); assertTrue(t1.hashCode() == t6.hashCode()); assertFalse(t1.hashCode() == t2.hashCode()); assertFalse(t1.hashCode() == t3.hashCode()); assertFalse(t1.hashCode() == t5.hashCode()); TaskAttemptId t7=createTaskAttemptId(ts2,5463346,4326575,TaskType.REDUCE,54375); assertEquals("attempt_" + ts1 + "_0002_m_000002_2",t1.toString()); assertEquals("attempt_" + ts2 + "_5463346_r_4326575_54375",t7.toString()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testTaskId(){ long ts1=1315890136000l; long ts2=1315890136001l; TaskId t1=createTaskId(ts1,1,2,TaskType.MAP); TaskId t2=createTaskId(ts1,1,2,TaskType.REDUCE); TaskId t3=createTaskId(ts1,1,1,TaskType.MAP); TaskId t4=createTaskId(ts1,1,2,TaskType.MAP); TaskId t5=createTaskId(ts2,1,1,TaskType.MAP); assertTrue(t1.equals(t4)); assertFalse(t1.equals(t2)); assertFalse(t1.equals(t3)); assertFalse(t1.equals(t5)); assertTrue(t1.compareTo(t4) == 0); assertTrue(t1.compareTo(t2) < 0); assertTrue(t1.compareTo(t3) > 0); assertTrue(t1.compareTo(t5) < 0); assertTrue(t1.hashCode() == t4.hashCode()); assertFalse(t1.hashCode() == t2.hashCode()); assertFalse(t1.hashCode() == t3.hashCode()); assertFalse(t1.hashCode() == t5.hashCode()); TaskId t6=createTaskId(ts1,324151,54643747,TaskType.REDUCE); assertEquals("task_" + ts1 + "_0001_m_000002",t1.toString()); assertEquals("task_" + ts1 + "_324151_r_54643747",t6.toString()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testJobId(){ long ts1=1315890136000l; long ts2=1315890136001l; JobId j1=createJobId(ts1,2); JobId j2=createJobId(ts1,1); JobId j3=createJobId(ts2,1); JobId j4=createJobId(ts1,2); assertTrue(j1.equals(j4)); assertFalse(j1.equals(j2)); assertFalse(j1.equals(j3)); assertTrue(j1.compareTo(j4) == 0); assertTrue(j1.compareTo(j2) > 0); assertTrue(j1.compareTo(j3) < 0); assertTrue(j1.hashCode() == j4.hashCode()); assertFalse(j1.hashCode() == j2.hashCode()); assertFalse(j1.hashCode() == j3.hashCode()); JobId j5=createJobId(ts1,231415); assertEquals("job_" + ts1 + "_0002",j1.toString()); assertEquals("job_" + ts1 + "_231415",j5.toString()); }

Class: org.apache.hadoop.mapreduce.v2.app.TestJobEndNotifier

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testNotificationOnLastRetryNormalShutdown() throws Exception { HttpServer2 server=startHttpServer(); MRApp app=spy(new MRAppWithCustomContainerAllocator(2,2,true,this.getClass().getName(),true,2,true)); doNothing().when(app).sysexit(); JobConf conf=new JobConf(); conf.set(JobContext.MR_JOB_END_NOTIFICATION_URL,JobEndServlet.baseUrl + "jobend?jobid=$jobId&status=$jobStatus"); JobImpl job=(JobImpl)app.submit(conf); app.waitForInternalState(job,JobStateInternal.SUCCEEDED); app.shutDownJob(); Assert.assertTrue(app.isLastAMRetry()); Assert.assertEquals(1,JobEndServlet.calledTimes); Assert.assertEquals("jobid=" + job.getID() + "&status=SUCCEEDED",JobEndServlet.requestUri.getQuery()); Assert.assertEquals(JobState.SUCCEEDED.toString(),JobEndServlet.foundJobState); server.stop(); }

BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testNotifyRetries() throws InterruptedException { JobConf conf=new JobConf(); conf.set(MRJobConfig.MR_JOB_END_RETRY_ATTEMPTS,"0"); conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_MAX_ATTEMPTS,"1"); conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_URL,"http://nonexistent"); conf.set(MRJobConfig.MR_JOB_END_RETRY_INTERVAL,"5000"); conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_MAX_RETRY_INTERVAL,"5000"); JobReport jobReport=mock(JobReport.class); long startTime=System.currentTimeMillis(); this.notificationCount=0; this.setConf(conf); this.notify(jobReport); long endTime=System.currentTimeMillis(); Assert.assertEquals("Only 1 try was expected but was : " + this.notificationCount,1,this.notificationCount); Assert.assertTrue("Should have taken more than 5 seconds it took " + (endTime - startTime),endTime - startTime > 5000); conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_MAX_ATTEMPTS,"3"); conf.set(MRJobConfig.MR_JOB_END_RETRY_ATTEMPTS,"3"); conf.set(MRJobConfig.MR_JOB_END_RETRY_INTERVAL,"3000"); conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_MAX_RETRY_INTERVAL,"3000"); startTime=System.currentTimeMillis(); this.notificationCount=0; this.setConf(conf); this.notify(jobReport); endTime=System.currentTimeMillis(); Assert.assertEquals("Only 3 retries were expected but was : " + this.notificationCount,3,this.notificationCount); Assert.assertTrue("Should have taken more than 9 seconds it took " + (endTime - startTime),endTime - startTime > 9000); }

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testNotificationOnLastRetryUnregistrationFailure() throws Exception { HttpServer2 server=startHttpServer(); MRApp app=spy(new MRAppWithCustomContainerAllocator(2,2,false,this.getClass().getName(),true,2,false)); app.isLastAMRetry=true; doNothing().when(app).sysexit(); JobConf conf=new JobConf(); conf.set(JobContext.MR_JOB_END_NOTIFICATION_URL,JobEndServlet.baseUrl + "jobend?jobid=$jobId&status=$jobStatus"); JobImpl job=(JobImpl)app.submit(conf); app.waitForState(job,JobState.RUNNING); app.getContext().getEventHandler().handle(new JobEvent(app.getJobId(),JobEventType.JOB_AM_REBOOT)); app.waitForInternalState(job,JobStateInternal.REBOOT); app.waitForServiceToStop(10000); Assert.assertFalse(app.isLastAMRetry()); Assert.assertEquals(0,JobEndServlet.calledTimes); Assert.assertNull(JobEndServlet.requestUri); Assert.assertNull(JobEndServlet.foundJobState); server.stop(); }

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testAbsentNotificationOnNotLastRetryUnregistrationFailure() throws Exception { HttpServer2 server=startHttpServer(); MRApp app=spy(new MRAppWithCustomContainerAllocator(2,2,false,this.getClass().getName(),true,1,false)); doNothing().when(app).sysexit(); JobConf conf=new JobConf(); conf.set(JobContext.MR_JOB_END_NOTIFICATION_URL,JobEndServlet.baseUrl + "jobend?jobid=$jobId&status=$jobStatus"); JobImpl job=(JobImpl)app.submit(conf); app.waitForState(job,JobState.RUNNING); app.getContext().getEventHandler().handle(new JobEvent(app.getJobId(),JobEventType.JOB_AM_REBOOT)); app.waitForInternalState(job,JobStateInternal.REBOOT); app.shutDownJob(); app.waitForState(job,JobState.RUNNING); Assert.assertFalse(app.isLastAMRetry()); Assert.assertEquals(0,JobEndServlet.calledTimes); Assert.assertNull(JobEndServlet.requestUri); Assert.assertNull(JobEndServlet.foundJobState); server.stop(); }

Class: org.apache.hadoop.mapreduce.v2.app.TestMRAppMaster

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testMRAppMasterFailLock() throws IOException, InterruptedException { String applicationAttemptIdStr="appattempt_1317529182569_0004_000002"; String containerIdStr="container_1317529182569_0004_000002_1"; String userName="TestAppMasterUser"; JobConf conf=new JobConf(); conf.set(MRJobConfig.MR_AM_STAGING_DIR,stagingDir); ApplicationAttemptId applicationAttemptId=ConverterUtils.toApplicationAttemptId(applicationAttemptIdStr); JobId jobId=TypeConverter.toYarn(TypeConverter.fromYarn(applicationAttemptId.getApplicationId())); Path start=MRApps.getStartJobCommitFile(conf,userName,jobId); Path end=MRApps.getEndJobCommitFailureFile(conf,userName,jobId); FileSystem fs=FileSystem.get(conf); fs.create(start).close(); fs.create(end).close(); ContainerId containerId=ConverterUtils.toContainerId(containerIdStr); MRAppMaster appMaster=new MRAppMasterTest(applicationAttemptId,containerId,"host",-1,-1,System.currentTimeMillis(),false,false); boolean caught=false; try { MRAppMaster.initAndStartAppMaster(appMaster,conf,userName); } catch ( IOException e) { LOG.info("Caught expected Exception",e); caught=true; } assertTrue(caught); assertTrue(appMaster.errorHappenedShutDown); assertEquals(JobStateInternal.FAILED,appMaster.forcedState); appMaster.stop(); verifyFailedStatus((MRAppMasterTest)appMaster,"FAILED"); }

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testMRAppMasterMissingStaging() throws IOException, InterruptedException { String applicationAttemptIdStr="appattempt_1317529182569_0004_000002"; String containerIdStr="container_1317529182569_0004_000002_1"; String userName="TestAppMasterUser"; JobConf conf=new JobConf(); conf.set(MRJobConfig.MR_AM_STAGING_DIR,stagingDir); ApplicationAttemptId applicationAttemptId=ConverterUtils.toApplicationAttemptId(applicationAttemptIdStr); File dir=new File(stagingDir); if (dir.exists()) { FileUtils.deleteDirectory(dir); } ContainerId containerId=ConverterUtils.toContainerId(containerIdStr); MRAppMaster appMaster=new MRAppMasterTest(applicationAttemptId,containerId,"host",-1,-1,System.currentTimeMillis(),false,false); boolean caught=false; try { MRAppMaster.initAndStartAppMaster(appMaster,conf,userName); } catch ( IOException e) { LOG.info("Caught expected Exception",e); caught=true; } assertTrue(caught); assertTrue(appMaster.errorHappenedShutDown); assertEquals(JobStateInternal.ERROR,appMaster.forcedState); appMaster.stop(); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testMRAppMasterSuccessLock() throws IOException, InterruptedException { String applicationAttemptIdStr="appattempt_1317529182569_0004_000002"; String containerIdStr="container_1317529182569_0004_000002_1"; String userName="TestAppMasterUser"; JobConf conf=new JobConf(); conf.set(MRJobConfig.MR_AM_STAGING_DIR,stagingDir); ApplicationAttemptId applicationAttemptId=ConverterUtils.toApplicationAttemptId(applicationAttemptIdStr); JobId jobId=TypeConverter.toYarn(TypeConverter.fromYarn(applicationAttemptId.getApplicationId())); Path start=MRApps.getStartJobCommitFile(conf,userName,jobId); Path end=MRApps.getEndJobCommitSuccessFile(conf,userName,jobId); FileSystem fs=FileSystem.get(conf); fs.create(start).close(); fs.create(end).close(); ContainerId containerId=ConverterUtils.toContainerId(containerIdStr); MRAppMaster appMaster=new MRAppMasterTest(applicationAttemptId,containerId,"host",-1,-1,System.currentTimeMillis(),false,false); boolean caught=false; try { MRAppMaster.initAndStartAppMaster(appMaster,conf,userName); } catch ( IOException e) { LOG.info("Caught expected Exception",e); caught=true; } assertTrue(caught); assertTrue(appMaster.errorHappenedShutDown); assertEquals(JobStateInternal.SUCCEEDED,appMaster.forcedState); appMaster.stop(); verifyFailedStatus((MRAppMasterTest)appMaster,"SUCCEEDED"); }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testMRAppMasterCredentials() throws Exception { Logger rootLogger=LogManager.getRootLogger(); rootLogger.setLevel(Level.DEBUG); Credentials credentials=new Credentials(); byte[] identifier="MyIdentifier".getBytes(); byte[] password="MyPassword".getBytes(); Text kind=new Text("MyTokenKind"); Text service=new Text("host:port"); Token myToken=new Token(identifier,password,kind,service); Text tokenAlias=new Text("myToken"); credentials.addToken(tokenAlias,myToken); Text appTokenService=new Text("localhost:0"); Token appToken=new Token(identifier,password,AMRMTokenIdentifier.KIND_NAME,appTokenService); credentials.addToken(appTokenService,appToken); Text keyAlias=new Text("mySecretKeyAlias"); credentials.addSecretKey(keyAlias,"mySecretKey".getBytes()); Token storedToken=credentials.getToken(tokenAlias); JobConf conf=new JobConf(); Path tokenFilePath=new Path(testDir.getAbsolutePath(),"tokens-file"); Map newEnv=new HashMap(); newEnv.put(UserGroupInformation.HADOOP_TOKEN_FILE_LOCATION,tokenFilePath.toUri().getPath()); setNewEnvironmentHack(newEnv); credentials.writeTokenStorageFile(tokenFilePath,conf); ApplicationId appId=ApplicationId.newInstance(12345,56); ApplicationAttemptId applicationAttemptId=ApplicationAttemptId.newInstance(appId,1); ContainerId containerId=ContainerId.newInstance(applicationAttemptId,546); String userName=UserGroupInformation.getCurrentUser().getShortUserName(); File stagingDir=new File(MRApps.getStagingAreaDir(conf,userName).toString()); stagingDir.mkdirs(); UserGroupInformation.setLoginUser(null); MRAppMasterTest appMaster=new MRAppMasterTest(applicationAttemptId,containerId,"host",-1,-1,System.currentTimeMillis(),false,true); MRAppMaster.initAndStartAppMaster(appMaster,conf,userName); Credentials appMasterCreds=appMaster.getCredentials(); Assert.assertNotNull(appMasterCreds); Assert.assertEquals(1,appMasterCreds.numberOfSecretKeys()); Assert.assertEquals(1,appMasterCreds.numberOfTokens()); Token usedToken=appMasterCreds.getToken(tokenAlias); Assert.assertNotNull(usedToken); Assert.assertEquals(storedToken,usedToken); byte[] usedKey=appMasterCreds.getSecretKey(keyAlias); Assert.assertNotNull(usedKey); Assert.assertEquals("mySecretKey",new String(usedKey)); Credentials confCredentials=conf.getCredentials(); Assert.assertEquals(1,confCredentials.numberOfSecretKeys()); Assert.assertEquals(1,confCredentials.numberOfTokens()); Assert.assertEquals(storedToken,confCredentials.getToken(tokenAlias)); Assert.assertEquals("mySecretKey",new String(confCredentials.getSecretKey(keyAlias))); Credentials ugiCredentials=appMaster.getUgi().getCredentials(); Assert.assertEquals(1,ugiCredentials.numberOfSecretKeys()); Assert.assertEquals(2,ugiCredentials.numberOfTokens()); Assert.assertEquals(storedToken,ugiCredentials.getToken(tokenAlias)); Assert.assertEquals(appToken,ugiCredentials.getToken(appTokenService)); Assert.assertEquals("mySecretKey",new String(ugiCredentials.getSecretKey(keyAlias))); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testMRAppMasterMidLock() throws IOException, InterruptedException { String applicationAttemptIdStr="appattempt_1317529182569_0004_000002"; String containerIdStr="container_1317529182569_0004_000002_1"; String userName="TestAppMasterUser"; JobConf conf=new JobConf(); conf.set(MRJobConfig.MR_AM_STAGING_DIR,stagingDir); ApplicationAttemptId applicationAttemptId=ConverterUtils.toApplicationAttemptId(applicationAttemptIdStr); JobId jobId=TypeConverter.toYarn(TypeConverter.fromYarn(applicationAttemptId.getApplicationId())); Path start=MRApps.getStartJobCommitFile(conf,userName,jobId); FileSystem fs=FileSystem.get(conf); fs.create(start).close(); ContainerId containerId=ConverterUtils.toContainerId(containerIdStr); MRAppMaster appMaster=new MRAppMasterTest(applicationAttemptId,containerId,"host",-1,-1,System.currentTimeMillis(),false,false); boolean caught=false; try { MRAppMaster.initAndStartAppMaster(appMaster,conf,userName); } catch ( IOException e) { LOG.info("Caught expected Exception",e); caught=true; } assertTrue(caught); assertTrue(appMaster.errorHappenedShutDown); assertEquals(JobStateInternal.ERROR,appMaster.forcedState); appMaster.stop(); verifyFailedStatus((MRAppMasterTest)appMaster,"FAILED"); }

Class: org.apache.hadoop.mapreduce.v2.app.TestMRClientService

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
@Test public void test() throws Exception { MRAppWithClientService app=new MRAppWithClientService(1,0,false); Configuration conf=new Configuration(); Job job=app.submit(conf); app.waitForState(job,JobState.RUNNING); Assert.assertEquals("Num tasks not correct",1,job.getTasks().size()); Iterator it=job.getTasks().values().iterator(); Task task=it.next(); app.waitForState(task,TaskState.RUNNING); TaskAttempt attempt=task.getAttempts().values().iterator().next(); app.waitForState(attempt,TaskAttemptState.RUNNING); String diagnostic1="Diagnostic1"; String diagnostic2="Diagnostic2"; app.getContext().getEventHandler().handle(new TaskAttemptDiagnosticsUpdateEvent(attempt.getID(),diagnostic1)); TaskAttemptStatus taskAttemptStatus=new TaskAttemptStatus(); taskAttemptStatus.id=attempt.getID(); taskAttemptStatus.progress=0.5f; taskAttemptStatus.stateString="RUNNING"; taskAttemptStatus.taskState=TaskAttemptState.RUNNING; taskAttemptStatus.phase=Phase.MAP; app.getContext().getEventHandler().handle(new TaskAttemptStatusUpdateEvent(attempt.getID(),taskAttemptStatus)); YarnRPC rpc=YarnRPC.create(conf); MRClientProtocol proxy=(MRClientProtocol)rpc.getProxy(MRClientProtocol.class,app.clientService.getBindAddress(),conf); GetCountersRequest gcRequest=recordFactory.newRecordInstance(GetCountersRequest.class); gcRequest.setJobId(job.getID()); Assert.assertNotNull("Counters is null",proxy.getCounters(gcRequest).getCounters()); GetJobReportRequest gjrRequest=recordFactory.newRecordInstance(GetJobReportRequest.class); gjrRequest.setJobId(job.getID()); JobReport jr=proxy.getJobReport(gjrRequest).getJobReport(); verifyJobReport(jr); GetTaskAttemptCompletionEventsRequest gtaceRequest=recordFactory.newRecordInstance(GetTaskAttemptCompletionEventsRequest.class); gtaceRequest.setJobId(job.getID()); gtaceRequest.setFromEventId(0); gtaceRequest.setMaxEvents(10); Assert.assertNotNull("TaskCompletionEvents is null",proxy.getTaskAttemptCompletionEvents(gtaceRequest).getCompletionEventList()); GetDiagnosticsRequest gdRequest=recordFactory.newRecordInstance(GetDiagnosticsRequest.class); gdRequest.setTaskAttemptId(attempt.getID()); Assert.assertNotNull("Diagnostics is null",proxy.getDiagnostics(gdRequest).getDiagnosticsList()); GetTaskAttemptReportRequest gtarRequest=recordFactory.newRecordInstance(GetTaskAttemptReportRequest.class); gtarRequest.setTaskAttemptId(attempt.getID()); TaskAttemptReport tar=proxy.getTaskAttemptReport(gtarRequest).getTaskAttemptReport(); verifyTaskAttemptReport(tar); GetTaskReportRequest gtrRequest=recordFactory.newRecordInstance(GetTaskReportRequest.class); gtrRequest.setTaskId(task.getID()); Assert.assertNotNull("TaskReport is null",proxy.getTaskReport(gtrRequest).getTaskReport()); GetTaskReportsRequest gtreportsRequest=recordFactory.newRecordInstance(GetTaskReportsRequest.class); gtreportsRequest.setJobId(job.getID()); gtreportsRequest.setTaskType(TaskType.MAP); Assert.assertNotNull("TaskReports for map is null",proxy.getTaskReports(gtreportsRequest).getTaskReportList()); gtreportsRequest=recordFactory.newRecordInstance(GetTaskReportsRequest.class); gtreportsRequest.setJobId(job.getID()); gtreportsRequest.setTaskType(TaskType.REDUCE); Assert.assertNotNull("TaskReports for reduce is null",proxy.getTaskReports(gtreportsRequest).getTaskReportList()); List diag=proxy.getDiagnostics(gdRequest).getDiagnosticsList(); Assert.assertEquals("Num diagnostics not correct",1,diag.size()); Assert.assertEquals("Diag 1 not correct",diagnostic1,diag.get(0).toString()); TaskReport taskReport=proxy.getTaskReport(gtrRequest).getTaskReport(); Assert.assertEquals("Num diagnostics not correct",1,taskReport.getDiagnosticsCount()); app.getContext().getEventHandler().handle(new TaskAttemptEvent(task.getAttempts().values().iterator().next().getID(),TaskAttemptEventType.TA_DONE)); app.waitForState(job,JobState.SUCCEEDED); }

UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testViewAclOnlyCannotModify() throws Exception { final MRAppWithClientService app=new MRAppWithClientService(1,0,false); final Configuration conf=new Configuration(); conf.setBoolean(MRConfig.MR_ACLS_ENABLED,true); conf.set(MRJobConfig.JOB_ACL_VIEW_JOB,"viewonlyuser"); Job job=app.submit(conf); app.waitForState(job,JobState.RUNNING); Assert.assertEquals("Num tasks not correct",1,job.getTasks().size()); Iterator it=job.getTasks().values().iterator(); Task task=it.next(); app.waitForState(task,TaskState.RUNNING); TaskAttempt attempt=task.getAttempts().values().iterator().next(); app.waitForState(attempt,TaskAttemptState.RUNNING); UserGroupInformation viewOnlyUser=UserGroupInformation.createUserForTesting("viewonlyuser",new String[]{}); Assert.assertTrue("viewonlyuser cannot view job",job.checkAccess(viewOnlyUser,JobACL.VIEW_JOB)); Assert.assertFalse("viewonlyuser can modify job",job.checkAccess(viewOnlyUser,JobACL.MODIFY_JOB)); MRClientProtocol client=viewOnlyUser.doAs(new PrivilegedExceptionAction(){ @Override public MRClientProtocol run() throws Exception { YarnRPC rpc=YarnRPC.create(conf); return (MRClientProtocol)rpc.getProxy(MRClientProtocol.class,app.clientService.getBindAddress(),conf); } } ); KillJobRequest killJobRequest=recordFactory.newRecordInstance(KillJobRequest.class); killJobRequest.setJobId(app.getJobId()); try { client.killJob(killJobRequest); fail("viewonlyuser killed job"); } catch ( AccessControlException e) { } KillTaskRequest killTaskRequest=recordFactory.newRecordInstance(KillTaskRequest.class); killTaskRequest.setTaskId(task.getID()); try { client.killTask(killTaskRequest); fail("viewonlyuser killed task"); } catch ( AccessControlException e) { } KillTaskAttemptRequest killTaskAttemptRequest=recordFactory.newRecordInstance(KillTaskAttemptRequest.class); killTaskAttemptRequest.setTaskAttemptId(attempt.getID()); try { client.killTaskAttempt(killTaskAttemptRequest); fail("viewonlyuser killed task attempt"); } catch ( AccessControlException e) { } FailTaskAttemptRequest failTaskAttemptRequest=recordFactory.newRecordInstance(FailTaskAttemptRequest.class); failTaskAttemptRequest.setTaskAttemptId(attempt.getID()); try { client.failTaskAttempt(failTaskAttemptRequest); fail("viewonlyuser killed task attempt"); } catch ( AccessControlException e) { } }

Class: org.apache.hadoop.mapreduce.v2.app.TestRecovery

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * AM with 2 maps and 1 reduce. For 1st map, one attempt fails, one attempt * completely disappears because of failed launch, one attempt gets killed and * one attempt succeeds. AM crashes after the first tasks finishes and * recovers completely and succeeds in the second generation. * @throws Exception */ @Test public void testSpeculative() throws Exception { int runCount=0; long am1StartTimeEst=System.currentTimeMillis(); MRApp app=new MRAppWithHistory(2,1,false,this.getClass().getName(),true,++runCount); Configuration conf=new Configuration(); conf.setBoolean("mapred.mapper.new-api",true); conf.setBoolean("mapred.reducer.new-api",true); conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false); conf.set(FileOutputFormat.OUTDIR,outputDir.toString()); Job job=app.submit(conf); app.waitForState(job,JobState.RUNNING); long jobStartTime=job.getReport().getStartTime(); Assert.assertEquals("No of tasks not correct",3,job.getTasks().size()); Iterator it=job.getTasks().values().iterator(); Task mapTask1=it.next(); Task mapTask2=it.next(); Task reduceTask=it.next(); app.waitForState(mapTask1,TaskState.RUNNING); app.waitForState(mapTask2,TaskState.RUNNING); app.getContext().getEventHandler().handle(new TaskEvent(mapTask1.getID(),TaskEventType.T_ADD_SPEC_ATTEMPT)); int timeOut=0; while (mapTask1.getAttempts().size() != 2 && timeOut++ < 10) { Thread.sleep(1000); LOG.info("Waiting for next attempt to start"); } Iterator t1it=mapTask1.getAttempts().values().iterator(); TaskAttempt task1Attempt1=t1it.next(); TaskAttempt task1Attempt2=t1it.next(); TaskAttempt task2Attempt=mapTask2.getAttempts().values().iterator().next(); ContainerId t1a2contId=task1Attempt2.getAssignedContainerID(); LOG.info(t1a2contId.toString()); LOG.info(task1Attempt1.getID().toString()); LOG.info(task1Attempt2.getID().toString()); app.getContext().getEventHandler().handle(new TaskAttemptContainerLaunchedEvent(task1Attempt2.getID(),runCount)); app.waitForState(task1Attempt1,TaskAttemptState.RUNNING); app.waitForState(task1Attempt2,TaskAttemptState.RUNNING); app.waitForState(task2Attempt,TaskAttemptState.RUNNING); Assert.assertEquals("Reduce Task state not correct",TaskState.RUNNING,reduceTask.getReport().getTaskState()); app.getContext().getEventHandler().handle(new TaskAttemptEvent(task1Attempt1.getID(),TaskAttemptEventType.TA_DONE)); app.waitForState(task1Attempt1,TaskAttemptState.SUCCEEDED); app.waitForState(mapTask1,TaskState.SUCCEEDED); long task1StartTime=mapTask1.getReport().getStartTime(); long task1FinishTime=mapTask1.getReport().getFinishTime(); app.stop(); long am2StartTimeEst=System.currentTimeMillis(); app=new MRAppWithHistory(2,1,false,this.getClass().getName(),false,++runCount); conf=new Configuration(); conf.setBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE,true); conf.setBoolean("mapred.mapper.new-api",true); conf.setBoolean("mapred.reducer.new-api",true); conf.set(FileOutputFormat.OUTDIR,outputDir.toString()); conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false); job=app.submit(conf); app.waitForState(job,JobState.RUNNING); Assert.assertEquals("No of tasks not correct",3,job.getTasks().size()); it=job.getTasks().values().iterator(); mapTask1=it.next(); mapTask2=it.next(); reduceTask=it.next(); app.waitForState(mapTask1,TaskState.SUCCEEDED); app.waitForState(mapTask2,TaskState.RUNNING); task2Attempt=mapTask2.getAttempts().values().iterator().next(); app.waitForState(task2Attempt,TaskAttemptState.RUNNING); app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapTask2.getAttempts().values().iterator().next().getID(),TaskAttemptEventType.TA_DONE)); app.waitForState(mapTask2,TaskState.SUCCEEDED); app.waitForState(reduceTask,TaskState.RUNNING); app.getContext().getEventHandler().handle(new TaskAttemptEvent(reduceTask.getAttempts().values().iterator().next().getID(),TaskAttemptEventType.TA_DONE)); app.waitForState(job,JobState.SUCCEEDED); app.verifyCompleted(); Assert.assertEquals("Job Start time not correct",jobStartTime,job.getReport().getStartTime()); Assert.assertEquals("Task Start time not correct",task1StartTime,mapTask1.getReport().getStartTime()); Assert.assertEquals("Task Finish time not correct",task1FinishTime,mapTask1.getReport().getFinishTime()); Assert.assertEquals(2,job.getAMInfos().size()); int attemptNum=1; for ( AMInfo amInfo : job.getAMInfos()) { Assert.assertEquals(attemptNum++,amInfo.getAppAttemptId().getAttemptId()); Assert.assertEquals(amInfo.getAppAttemptId(),amInfo.getContainerId().getApplicationAttemptId()); Assert.assertEquals(MRApp.NM_HOST,amInfo.getNodeManagerHost()); Assert.assertEquals(MRApp.NM_PORT,amInfo.getNodeManagerPort()); Assert.assertEquals(MRApp.NM_HTTP_PORT,amInfo.getNodeManagerHttpPort()); } long am1StartTimeReal=job.getAMInfos().get(0).getStartTime(); long am2StartTimeReal=job.getAMInfos().get(1).getStartTime(); Assert.assertTrue(am1StartTimeReal >= am1StartTimeEst && am1StartTimeReal <= am2StartTimeEst); Assert.assertTrue(am2StartTimeReal >= am2StartTimeEst && am2StartTimeReal <= System.currentTimeMillis()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * AM with 2 maps and 1 reduce. For 1st map, one attempt fails, one attempt * completely disappears because of failed launch, one attempt gets killed and * one attempt succeeds. AM crashes after the first tasks finishes and * recovers completely and succeeds in the second generation. * @throws Exception */ @Test public void testCrashed() throws Exception { int runCount=0; long am1StartTimeEst=System.currentTimeMillis(); MRApp app=new MRAppWithHistory(2,1,false,this.getClass().getName(),true,++runCount); Configuration conf=new Configuration(); conf.setBoolean("mapred.mapper.new-api",true); conf.setBoolean("mapred.reducer.new-api",true); conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false); conf.set(FileOutputFormat.OUTDIR,outputDir.toString()); Job job=app.submit(conf); app.waitForState(job,JobState.RUNNING); long jobStartTime=job.getReport().getStartTime(); Assert.assertEquals("No of tasks not correct",3,job.getTasks().size()); Iterator it=job.getTasks().values().iterator(); Task mapTask1=it.next(); Task mapTask2=it.next(); Task reduceTask=it.next(); app.waitForState(mapTask1,TaskState.RUNNING); app.waitForState(mapTask2,TaskState.RUNNING); TaskAttempt task1Attempt1=mapTask1.getAttempts().values().iterator().next(); TaskAttempt task2Attempt=mapTask2.getAttempts().values().iterator().next(); app.waitForState(task1Attempt1,TaskAttemptState.RUNNING); app.waitForState(task2Attempt,TaskAttemptState.RUNNING); Assert.assertEquals("Reduce Task state not correct",TaskState.RUNNING,reduceTask.getReport().getTaskState()); app.getContext().getEventHandler().handle(new TaskAttemptEvent(task1Attempt1.getID(),TaskAttemptEventType.TA_FAILMSG)); app.waitForState(task1Attempt1,TaskAttemptState.FAILED); int timeOut=0; while (mapTask1.getAttempts().size() != 2 && timeOut++ < 10) { Thread.sleep(2000); LOG.info("Waiting for next attempt to start"); } Assert.assertEquals(2,mapTask1.getAttempts().size()); Iterator itr=mapTask1.getAttempts().values().iterator(); itr.next(); TaskAttempt task1Attempt2=itr.next(); app.getContext().getEventHandler().handle(new TaskAttemptEvent(task1Attempt2.getID(),TaskAttemptEventType.TA_CONTAINER_LAUNCH_FAILED)); app.waitForState(task1Attempt2,TaskAttemptState.FAILED); timeOut=0; while (mapTask1.getAttempts().size() != 3 && timeOut++ < 10) { Thread.sleep(2000); LOG.info("Waiting for next attempt to start"); } Assert.assertEquals(3,mapTask1.getAttempts().size()); itr=mapTask1.getAttempts().values().iterator(); itr.next(); itr.next(); TaskAttempt task1Attempt3=itr.next(); app.waitForState(task1Attempt3,TaskAttemptState.RUNNING); app.getContext().getEventHandler().handle(new TaskAttemptEvent(task1Attempt3.getID(),TaskAttemptEventType.TA_KILL)); app.waitForState(task1Attempt3,TaskAttemptState.KILLED); timeOut=0; while (mapTask1.getAttempts().size() != 4 && timeOut++ < 10) { Thread.sleep(2000); LOG.info("Waiting for next attempt to start"); } Assert.assertEquals(4,mapTask1.getAttempts().size()); itr=mapTask1.getAttempts().values().iterator(); itr.next(); itr.next(); itr.next(); TaskAttempt task1Attempt4=itr.next(); app.waitForState(task1Attempt4,TaskAttemptState.RUNNING); app.getContext().getEventHandler().handle(new TaskAttemptEvent(task1Attempt4.getID(),TaskAttemptEventType.TA_DONE)); app.waitForState(mapTask1,TaskState.SUCCEEDED); long task1StartTime=mapTask1.getReport().getStartTime(); long task1FinishTime=mapTask1.getReport().getFinishTime(); app.stop(); long am2StartTimeEst=System.currentTimeMillis(); app=new MRAppWithHistory(2,1,false,this.getClass().getName(),false,++runCount); conf=new Configuration(); conf.setBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE,true); conf.setBoolean("mapred.mapper.new-api",true); conf.setBoolean("mapred.reducer.new-api",true); conf.set(FileOutputFormat.OUTDIR,outputDir.toString()); conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false); job=app.submit(conf); app.waitForState(job,JobState.RUNNING); Assert.assertEquals("No of tasks not correct",3,job.getTasks().size()); it=job.getTasks().values().iterator(); mapTask1=it.next(); mapTask2=it.next(); reduceTask=it.next(); app.waitForState(mapTask1,TaskState.SUCCEEDED); app.waitForState(mapTask2,TaskState.RUNNING); task2Attempt=mapTask2.getAttempts().values().iterator().next(); app.waitForState(task2Attempt,TaskAttemptState.RUNNING); app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapTask2.getAttempts().values().iterator().next().getID(),TaskAttemptEventType.TA_DONE)); app.waitForState(mapTask2,TaskState.SUCCEEDED); app.waitForState(reduceTask,TaskState.RUNNING); app.getContext().getEventHandler().handle(new TaskAttemptEvent(reduceTask.getAttempts().values().iterator().next().getID(),TaskAttemptEventType.TA_DONE)); app.waitForState(job,JobState.SUCCEEDED); app.verifyCompleted(); Assert.assertEquals("Job Start time not correct",jobStartTime,job.getReport().getStartTime()); Assert.assertEquals("Task Start time not correct",task1StartTime,mapTask1.getReport().getStartTime()); Assert.assertEquals("Task Finish time not correct",task1FinishTime,mapTask1.getReport().getFinishTime()); Assert.assertEquals(2,job.getAMInfos().size()); int attemptNum=1; for ( AMInfo amInfo : job.getAMInfos()) { Assert.assertEquals(attemptNum++,amInfo.getAppAttemptId().getAttemptId()); Assert.assertEquals(amInfo.getAppAttemptId(),amInfo.getContainerId().getApplicationAttemptId()); Assert.assertEquals(MRApp.NM_HOST,amInfo.getNodeManagerHost()); Assert.assertEquals(MRApp.NM_PORT,amInfo.getNodeManagerPort()); Assert.assertEquals(MRApp.NM_HTTP_PORT,amInfo.getNodeManagerHttpPort()); } long am1StartTimeReal=job.getAMInfos().get(0).getStartTime(); long am2StartTimeReal=job.getAMInfos().get(1).getStartTime(); Assert.assertTrue(am1StartTimeReal >= am1StartTimeEst && am1StartTimeReal <= am2StartTimeEst); Assert.assertTrue(am2StartTimeReal >= am2StartTimeEst && am2StartTimeReal <= System.currentTimeMillis()); }

Class: org.apache.hadoop.mapreduce.v2.app.TestStagingCleanup

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testDeletionofStaging() throws IOException { conf.set(MRJobConfig.MAPREDUCE_JOB_DIR,stagingJobDir); fs=mock(FileSystem.class); when(fs.delete(any(Path.class),anyBoolean())).thenReturn(true); String user=UserGroupInformation.getCurrentUser().getShortUserName(); Path stagingDir=MRApps.getStagingAreaDir(conf,user); when(fs.exists(stagingDir)).thenReturn(true); ApplicationId appId=ApplicationId.newInstance(System.currentTimeMillis(),0); ApplicationAttemptId attemptId=ApplicationAttemptId.newInstance(appId,1); JobId jobid=recordFactory.newRecordInstance(JobId.class); jobid.setAppId(appId); ContainerAllocator mockAlloc=mock(ContainerAllocator.class); Assert.assertTrue(MRJobConfig.DEFAULT_MR_AM_MAX_ATTEMPTS > 1); MRAppMaster appMaster=new TestMRApp(attemptId,mockAlloc,JobStateInternal.RUNNING,MRJobConfig.DEFAULT_MR_AM_MAX_ATTEMPTS); appMaster.init(conf); appMaster.start(); appMaster.shutDownJob(); Assert.assertEquals(true,((TestMRApp)appMaster).getTestIsLastAMRetry()); verify(fs).delete(stagingJobPath,true); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testNoDeletionofStagingOnReboot() throws IOException { conf.set(MRJobConfig.MAPREDUCE_JOB_DIR,stagingJobDir); fs=mock(FileSystem.class); when(fs.delete(any(Path.class),anyBoolean())).thenReturn(true); String user=UserGroupInformation.getCurrentUser().getShortUserName(); Path stagingDir=MRApps.getStagingAreaDir(conf,user); when(fs.exists(stagingDir)).thenReturn(true); ApplicationId appId=ApplicationId.newInstance(System.currentTimeMillis(),0); ApplicationAttemptId attemptId=ApplicationAttemptId.newInstance(appId,1); ContainerAllocator mockAlloc=mock(ContainerAllocator.class); Assert.assertTrue(MRJobConfig.DEFAULT_MR_AM_MAX_ATTEMPTS > 1); MRAppMaster appMaster=new TestMRApp(attemptId,mockAlloc,JobStateInternal.REBOOT,MRJobConfig.DEFAULT_MR_AM_MAX_ATTEMPTS); appMaster.init(conf); appMaster.start(); appMaster.shutDownJob(); Assert.assertEquals(false,((TestMRApp)appMaster).getTestIsLastAMRetry()); verify(fs,times(0)).delete(stagingJobPath,true); }

Class: org.apache.hadoop.mapreduce.v2.app.commit.TestCommitterEventHandler

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier 
@Test public void testBasic() throws Exception { AppContext mockContext=mock(AppContext.class); OutputCommitter mockCommitter=mock(OutputCommitter.class); Clock mockClock=mock(Clock.class); CommitterEventHandler handler=new CommitterEventHandler(mockContext,mockCommitter,new TestingRMHeartbeatHandler()); YarnConfiguration conf=new YarnConfiguration(); conf.set(MRJobConfig.MR_AM_STAGING_DIR,stagingDir); JobContext mockJobContext=mock(JobContext.class); ApplicationAttemptId attemptid=ConverterUtils.toApplicationAttemptId("appattempt_1234567890000_0001_0"); JobId jobId=TypeConverter.toYarn(TypeConverter.fromYarn(attemptid.getApplicationId())); WaitForItHandler waitForItHandler=new WaitForItHandler(); when(mockContext.getApplicationID()).thenReturn(attemptid.getApplicationId()); when(mockContext.getApplicationAttemptId()).thenReturn(attemptid); when(mockContext.getEventHandler()).thenReturn(waitForItHandler); when(mockContext.getClock()).thenReturn(mockClock); handler.init(conf); handler.start(); try { handler.handle(new CommitterJobCommitEvent(jobId,mockJobContext)); String user=UserGroupInformation.getCurrentUser().getShortUserName(); Path startCommitFile=MRApps.getStartJobCommitFile(conf,user,jobId); Path endCommitSuccessFile=MRApps.getEndJobCommitSuccessFile(conf,user,jobId); Path endCommitFailureFile=MRApps.getEndJobCommitFailureFile(conf,user,jobId); Event e=waitForItHandler.getAndClearEvent(); assertNotNull(e); assertTrue(e instanceof JobCommitCompletedEvent); FileSystem fs=FileSystem.get(conf); assertTrue(startCommitFile.toString(),fs.exists(startCommitFile)); assertTrue(endCommitSuccessFile.toString(),fs.exists(endCommitSuccessFile)); assertFalse(endCommitFailureFile.toString(),fs.exists(endCommitFailureFile)); verify(mockCommitter).commitJob(any(JobContext.class)); } finally { handler.stop(); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier 
@Test public void testFailure() throws Exception { AppContext mockContext=mock(AppContext.class); OutputCommitter mockCommitter=mock(OutputCommitter.class); Clock mockClock=mock(Clock.class); CommitterEventHandler handler=new CommitterEventHandler(mockContext,mockCommitter,new TestingRMHeartbeatHandler()); YarnConfiguration conf=new YarnConfiguration(); conf.set(MRJobConfig.MR_AM_STAGING_DIR,stagingDir); JobContext mockJobContext=mock(JobContext.class); ApplicationAttemptId attemptid=ConverterUtils.toApplicationAttemptId("appattempt_1234567890000_0001_0"); JobId jobId=TypeConverter.toYarn(TypeConverter.fromYarn(attemptid.getApplicationId())); WaitForItHandler waitForItHandler=new WaitForItHandler(); when(mockContext.getApplicationID()).thenReturn(attemptid.getApplicationId()); when(mockContext.getApplicationAttemptId()).thenReturn(attemptid); when(mockContext.getEventHandler()).thenReturn(waitForItHandler); when(mockContext.getClock()).thenReturn(mockClock); doThrow(new YarnRuntimeException("Intentional Failure")).when(mockCommitter).commitJob(any(JobContext.class)); handler.init(conf); handler.start(); try { handler.handle(new CommitterJobCommitEvent(jobId,mockJobContext)); String user=UserGroupInformation.getCurrentUser().getShortUserName(); Path startCommitFile=MRApps.getStartJobCommitFile(conf,user,jobId); Path endCommitSuccessFile=MRApps.getEndJobCommitSuccessFile(conf,user,jobId); Path endCommitFailureFile=MRApps.getEndJobCommitFailureFile(conf,user,jobId); Event e=waitForItHandler.getAndClearEvent(); assertNotNull(e); assertTrue(e instanceof JobCommitFailedEvent); FileSystem fs=FileSystem.get(conf); assertTrue(fs.exists(startCommitFile)); assertFalse(fs.exists(endCommitSuccessFile)); assertTrue(fs.exists(endCommitFailureFile)); verify(mockCommitter).commitJob(any(JobContext.class)); } finally { handler.stop(); } }

Class: org.apache.hadoop.mapreduce.v2.app.job.impl.TestJobImpl

UtilityVerifier BooleanVerifier HybridVerifier 
@Test public void testJobNoTasks(){ Configuration conf=new Configuration(); conf.setInt(MRJobConfig.NUM_REDUCES,0); conf.set(MRJobConfig.MR_AM_STAGING_DIR,stagingDir); conf.set(MRJobConfig.WORKFLOW_ID,"testId"); conf.set(MRJobConfig.WORKFLOW_NAME,"testName"); conf.set(MRJobConfig.WORKFLOW_NODE_NAME,"testNodeName"); conf.set(MRJobConfig.WORKFLOW_ADJACENCY_PREFIX_STRING + "key1","value1"); conf.set(MRJobConfig.WORKFLOW_ADJACENCY_PREFIX_STRING + "key2","value2"); conf.set(MRJobConfig.WORKFLOW_TAGS,"tag1,tag2"); AsyncDispatcher dispatcher=new AsyncDispatcher(); dispatcher.init(conf); dispatcher.start(); OutputCommitter committer=mock(OutputCommitter.class); CommitterEventHandler commitHandler=createCommitterEventHandler(dispatcher,committer); commitHandler.init(conf); commitHandler.start(); JobSubmittedEventHandler jseHandler=new JobSubmittedEventHandler("testId","testName","testNodeName","\"key2\"=\"value2\" \"key1\"=\"value1\" ","tag1,tag2"); dispatcher.register(EventType.class,jseHandler); JobImpl job=createStubbedJob(conf,dispatcher,0,null); job.handle(new JobEvent(job.getID(),JobEventType.JOB_INIT)); assertJobState(job,JobStateInternal.INITED); job.handle(new JobStartEvent(job.getID())); assertJobState(job,JobStateInternal.SUCCEEDED); dispatcher.stop(); commitHandler.stop(); try { Assert.assertTrue(jseHandler.getAssertValue()); } catch ( InterruptedException e) { Assert.fail("Workflow related attributes are not tested properly"); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier 
@Test public void testReportDiagnostics() throws Exception { JobID jobID=JobID.forName("job_1234567890000_0001"); JobId jobId=TypeConverter.toYarn(jobID); final String diagMsg="some diagnostic message"; final JobDiagnosticsUpdateEvent diagUpdateEvent=new JobDiagnosticsUpdateEvent(jobId,diagMsg); MRAppMetrics mrAppMetrics=MRAppMetrics.create(); AppContext mockContext=mock(AppContext.class); when(mockContext.hasSuccessfullyUnregistered()).thenReturn(true); JobImpl job=new JobImpl(jobId,Records.newRecord(ApplicationAttemptId.class),new Configuration(),mock(EventHandler.class),null,mock(JobTokenSecretManager.class),null,new SystemClock(),null,mrAppMetrics,null,true,null,0,null,mockContext,null,null); job.handle(diagUpdateEvent); String diagnostics=job.getReport().getDiagnostics(); Assert.assertNotNull(diagnostics); Assert.assertTrue(diagnostics.contains(diagMsg)); job=new JobImpl(jobId,Records.newRecord(ApplicationAttemptId.class),new Configuration(),mock(EventHandler.class),null,mock(JobTokenSecretManager.class),null,new SystemClock(),null,mrAppMetrics,null,true,null,0,null,mockContext,null,null); job.handle(new JobEvent(jobId,JobEventType.JOB_KILL)); job.handle(diagUpdateEvent); diagnostics=job.getReport().getDiagnostics(); Assert.assertNotNull(diagnostics); Assert.assertTrue(diagnostics.contains(diagMsg)); }

Class: org.apache.hadoop.mapreduce.v2.app.job.impl.TestMapReduceChildJVM

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testCommandLine() throws Exception { MyMRApp app=new MyMRApp(1,0,true,this.getClass().getName(),true); Configuration conf=new Configuration(); conf.setBoolean(MRConfig.MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM,true); Job job=app.submit(conf); app.waitForState(job,JobState.SUCCEEDED); app.verifyCompleted(); Assert.assertEquals("[" + MRApps.crossPlatformify("JAVA_HOME") + "/bin/java"+ " -Djava.net.preferIPv4Stack=true"+ " -Dhadoop.metrics.log.level=WARN"+ " -Xmx200m -Djava.io.tmpdir="+ MRApps.crossPlatformify("PWD")+ "/tmp"+ " -Dlog4j.configuration=container-log4j.properties"+ " -Dyarn.app.container.log.dir="+ " -Dyarn.app.container.log.filesize=0"+ " -Dhadoop.root.logger=INFO,CLA"+ " org.apache.hadoop.mapred.YarnChild 127.0.0.1"+ " 54321"+ " attempt_0_0000_m_000000_0"+ " 0"+ " 1>/stdout"+ " 2>/stderr ]",app.myCommandLine); Assert.assertTrue("HADOOP_ROOT_LOGGER not set for job",app.cmdEnvironment.containsKey("HADOOP_ROOT_LOGGER")); Assert.assertEquals("INFO,console",app.cmdEnvironment.get("HADOOP_ROOT_LOGGER")); Assert.assertTrue("HADOOP_CLIENT_OPTS not set for job",app.cmdEnvironment.containsKey("HADOOP_CLIENT_OPTS")); Assert.assertEquals("",app.cmdEnvironment.get("HADOOP_CLIENT_OPTS")); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testEnvironmentVariables() throws Exception { MyMRApp app=new MyMRApp(1,0,true,this.getClass().getName(),true); Configuration conf=new Configuration(); conf.set(JobConf.MAPRED_MAP_TASK_ENV,"HADOOP_CLIENT_OPTS=test"); conf.setStrings(MRJobConfig.MAP_LOG_LEVEL,"WARN"); conf.setBoolean(MRConfig.MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM,false); Job job=app.submit(conf); app.waitForState(job,JobState.SUCCEEDED); app.verifyCompleted(); Assert.assertTrue("HADOOP_ROOT_LOGGER not set for job",app.cmdEnvironment.containsKey("HADOOP_ROOT_LOGGER")); Assert.assertEquals("WARN,console",app.cmdEnvironment.get("HADOOP_ROOT_LOGGER")); Assert.assertTrue("HADOOP_CLIENT_OPTS not set for job",app.cmdEnvironment.containsKey("HADOOP_CLIENT_OPTS")); Assert.assertEquals("test",app.cmdEnvironment.get("HADOOP_CLIENT_OPTS")); app=new MyMRApp(1,0,true,this.getClass().getName(),true); conf=new Configuration(); conf.set(JobConf.MAPRED_MAP_TASK_ENV,"HADOOP_ROOT_LOGGER=trace"); job=app.submit(conf); app.waitForState(job,JobState.SUCCEEDED); app.verifyCompleted(); Assert.assertTrue("HADOOP_ROOT_LOGGER not set for job",app.cmdEnvironment.containsKey("HADOOP_ROOT_LOGGER")); Assert.assertEquals("trace",app.cmdEnvironment.get("HADOOP_ROOT_LOGGER")); }

Class: org.apache.hadoop.mapreduce.v2.app.job.impl.TestShuffleProvider

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier 
@Test public void testShuffleProviders() throws Exception { ApplicationId appId=ApplicationId.newInstance(1,1); JobId jobId=MRBuilderUtils.newJobId(appId,1); TaskId taskId=MRBuilderUtils.newTaskId(jobId,1,TaskType.MAP); Path jobFile=mock(Path.class); EventHandler eventHandler=mock(EventHandler.class); TaskAttemptListener taListener=mock(TaskAttemptListener.class); when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost",0)); JobConf jobConf=new JobConf(); jobConf.setClass("fs.file.impl",StubbedFS.class,FileSystem.class); jobConf.setBoolean("fs.file.impl.disable.cache",true); jobConf.set(JobConf.MAPRED_MAP_TASK_ENV,""); jobConf.set(YarnConfiguration.NM_AUX_SERVICES,TestShuffleHandler1.MAPREDUCE_TEST_SHUFFLE_SERVICEID + "," + TestShuffleHandler2.MAPREDUCE_TEST_SHUFFLE_SERVICEID); String serviceName=TestShuffleHandler1.MAPREDUCE_TEST_SHUFFLE_SERVICEID; String serviceStr=String.format(YarnConfiguration.NM_AUX_SERVICE_FMT,serviceName); jobConf.set(serviceStr,TestShuffleHandler1.class.getName()); serviceName=TestShuffleHandler2.MAPREDUCE_TEST_SHUFFLE_SERVICEID; serviceStr=String.format(YarnConfiguration.NM_AUX_SERVICE_FMT,serviceName); jobConf.set(serviceStr,TestShuffleHandler2.class.getName()); jobConf.set(MRJobConfig.MAPREDUCE_JOB_SHUFFLE_PROVIDER_SERVICES,TestShuffleHandler1.MAPREDUCE_TEST_SHUFFLE_SERVICEID + "," + TestShuffleHandler2.MAPREDUCE_TEST_SHUFFLE_SERVICEID); Credentials credentials=new Credentials(); Token jobToken=new Token(("tokenid").getBytes(),("tokenpw").getBytes(),new Text("tokenkind"),new Text("tokenservice")); TaskAttemptImpl taImpl=new MapTaskAttemptImpl(taskId,1,eventHandler,jobFile,1,mock(TaskSplitMetaInfo.class),jobConf,taListener,jobToken,credentials,new SystemClock(),null); jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID,taImpl.getID().toString()); ContainerLaunchContext launchCtx=TaskAttemptImpl.createContainerLaunchContext(null,jobConf,jobToken,taImpl.createRemoteTask(),TypeConverter.fromYarn(jobId),mock(WrappedJvmID.class),taListener,credentials); Map serviceDataMap=launchCtx.getServiceData(); Assert.assertNotNull("TestShuffleHandler1 is missing",serviceDataMap.get(TestShuffleHandler1.MAPREDUCE_TEST_SHUFFLE_SERVICEID)); Assert.assertNotNull("TestShuffleHandler2 is missing",serviceDataMap.get(TestShuffleHandler2.MAPREDUCE_TEST_SHUFFLE_SERVICEID)); Assert.assertTrue("mismatch number of services in map",serviceDataMap.size() == 3); }

Class: org.apache.hadoop.mapreduce.v2.app.job.impl.TestTaskAttempt

BranchVerifier UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testHostResolveAttempt() throws Exception { TaskAttemptImpl.RequestContainerTransition rct=new TaskAttemptImpl.RequestContainerTransition(false); EventHandler eventHandler=mock(EventHandler.class); String[] hosts=new String[3]; hosts[0]="192.168.1.1"; hosts[1]="host2"; hosts[2]="host3"; TaskSplitMetaInfo splitInfo=new TaskSplitMetaInfo(hosts,0,128 * 1024 * 1024l); TaskAttemptImpl mockTaskAttempt=createMapTaskAttemptImplForTest(eventHandler,splitInfo); TaskAttemptImpl spyTa=spy(mockTaskAttempt); when(spyTa.resolveHost(hosts[0])).thenReturn("host1"); spyTa.dataLocalHosts=spyTa.resolveHosts(splitInfo.getLocations()); TaskAttemptEvent mockTAEvent=mock(TaskAttemptEvent.class); rct.transition(spyTa,mockTAEvent); verify(spyTa).resolveHost(hosts[0]); ArgumentCaptor arg=ArgumentCaptor.forClass(Event.class); verify(eventHandler,times(2)).handle(arg.capture()); if (!(arg.getAllValues().get(1) instanceof ContainerRequestEvent)) { Assert.fail("Second Event not of type ContainerRequestEvent"); } Map expected=new HashMap(); expected.put("host1",true); expected.put("host2",true); expected.put("host3",true); ContainerRequestEvent cre=(ContainerRequestEvent)arg.getAllValues().get(1); String[] requestedHosts=cre.getHosts(); for ( String h : requestedHosts) { expected.remove(h); } assertEquals(0,expected.size()); }

BranchVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testSingleRackRequest() throws Exception { TaskAttemptImpl.RequestContainerTransition rct=new TaskAttemptImpl.RequestContainerTransition(false); EventHandler eventHandler=mock(EventHandler.class); String[] hosts=new String[3]; hosts[0]="host1"; hosts[1]="host2"; hosts[2]="host3"; TaskSplitMetaInfo splitInfo=new TaskSplitMetaInfo(hosts,0,128 * 1024 * 1024l); TaskAttemptImpl mockTaskAttempt=createMapTaskAttemptImplForTest(eventHandler,splitInfo); TaskAttemptEvent mockTAEvent=mock(TaskAttemptEvent.class); rct.transition(mockTaskAttempt,mockTAEvent); ArgumentCaptor arg=ArgumentCaptor.forClass(Event.class); verify(eventHandler,times(2)).handle(arg.capture()); if (!(arg.getAllValues().get(1) instanceof ContainerRequestEvent)) { Assert.fail("Second Event not of type ContainerRequestEvent"); } ContainerRequestEvent cre=(ContainerRequestEvent)arg.getAllValues().get(1); String[] requestedRacks=cre.getRacks(); assertEquals(1,requestedRacks.length); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testFetchFailureAttemptFinishTime() throws Exception { ApplicationId appId=ApplicationId.newInstance(1,2); ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,0); JobId jobId=MRBuilderUtils.newJobId(appId,1); TaskId taskId=MRBuilderUtils.newTaskId(jobId,1,TaskType.MAP); TaskAttemptId attemptId=MRBuilderUtils.newTaskAttemptId(taskId,0); Path jobFile=mock(Path.class); MockEventHandler eventHandler=new MockEventHandler(); TaskAttemptListener taListener=mock(TaskAttemptListener.class); when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost",0)); JobConf jobConf=new JobConf(); jobConf.setClass("fs.file.impl",StubbedFS.class,FileSystem.class); jobConf.setBoolean("fs.file.impl.disable.cache",true); jobConf.set(JobConf.MAPRED_MAP_TASK_ENV,""); jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID,"10"); TaskSplitMetaInfo splits=mock(TaskSplitMetaInfo.class); when(splits.getLocations()).thenReturn(new String[]{"127.0.0.1"}); AppContext appCtx=mock(AppContext.class); ClusterInfo clusterInfo=mock(ClusterInfo.class); when(appCtx.getClusterInfo()).thenReturn(clusterInfo); TaskAttemptImpl taImpl=new MapTaskAttemptImpl(taskId,1,eventHandler,jobFile,1,splits,jobConf,taListener,mock(Token.class),new Credentials(),new SystemClock(),appCtx); NodeId nid=NodeId.newInstance("127.0.0.1",0); ContainerId contId=ContainerId.newInstance(appAttemptId,3); Container container=mock(Container.class); when(container.getId()).thenReturn(contId); when(container.getNodeId()).thenReturn(nid); when(container.getNodeHttpAddress()).thenReturn("localhost:0"); taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_SCHEDULE)); taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId,container,mock(Map.class))); taImpl.handle(new TaskAttemptContainerLaunchedEvent(attemptId,0)); taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_DONE)); taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_CONTAINER_CLEANED)); assertEquals("Task attempt is not in succeeded state",taImpl.getState(),TaskAttemptState.SUCCEEDED); assertTrue("Task Attempt finish time is not greater than 0",taImpl.getFinishTime() > 0); Long finishTime=taImpl.getFinishTime(); Thread.sleep(5); taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_TOO_MANY_FETCH_FAILURE)); assertEquals("Task attempt is not in Too Many Fetch Failure state",taImpl.getState(),TaskAttemptState.FAILED); assertEquals("After TA_TOO_MANY_FETCH_FAILURE," + " Task attempt finish time is not the same ",finishTime,Long.valueOf(taImpl.getFinishTime())); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testDoubleTooManyFetchFailure() throws Exception { ApplicationId appId=ApplicationId.newInstance(1,2); ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,0); JobId jobId=MRBuilderUtils.newJobId(appId,1); TaskId taskId=MRBuilderUtils.newTaskId(jobId,1,TaskType.MAP); TaskAttemptId attemptId=MRBuilderUtils.newTaskAttemptId(taskId,0); Path jobFile=mock(Path.class); MockEventHandler eventHandler=new MockEventHandler(); TaskAttemptListener taListener=mock(TaskAttemptListener.class); when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost",0)); JobConf jobConf=new JobConf(); jobConf.setClass("fs.file.impl",StubbedFS.class,FileSystem.class); jobConf.setBoolean("fs.file.impl.disable.cache",true); jobConf.set(JobConf.MAPRED_MAP_TASK_ENV,""); jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID,"10"); TaskSplitMetaInfo splits=mock(TaskSplitMetaInfo.class); when(splits.getLocations()).thenReturn(new String[]{"127.0.0.1"}); AppContext appCtx=mock(AppContext.class); ClusterInfo clusterInfo=mock(ClusterInfo.class); Resource resource=mock(Resource.class); when(appCtx.getClusterInfo()).thenReturn(clusterInfo); when(resource.getMemory()).thenReturn(1024); TaskAttemptImpl taImpl=new MapTaskAttemptImpl(taskId,1,eventHandler,jobFile,1,splits,jobConf,taListener,new Token(),new Credentials(),new SystemClock(),appCtx); NodeId nid=NodeId.newInstance("127.0.0.1",0); ContainerId contId=ContainerId.newInstance(appAttemptId,3); Container container=mock(Container.class); when(container.getId()).thenReturn(contId); when(container.getNodeId()).thenReturn(nid); when(container.getNodeHttpAddress()).thenReturn("localhost:0"); taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_SCHEDULE)); taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId,container,mock(Map.class))); taImpl.handle(new TaskAttemptContainerLaunchedEvent(attemptId,0)); taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_DONE)); taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_CONTAINER_CLEANED)); assertEquals("Task attempt is not in succeeded state",taImpl.getState(),TaskAttemptState.SUCCEEDED); taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_TOO_MANY_FETCH_FAILURE)); assertEquals("Task attempt is not in FAILED state",taImpl.getState(),TaskAttemptState.FAILED); taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_TOO_MANY_FETCH_FAILURE)); assertEquals("Task attempt is not in FAILED state, still",taImpl.getState(),TaskAttemptState.FAILED); assertFalse("InternalError occurred trying to handle TA_CONTAINER_CLEANED",eventHandler.internalError); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testLaunchFailedWhileKilling() throws Exception { ApplicationId appId=ApplicationId.newInstance(1,2); ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,0); JobId jobId=MRBuilderUtils.newJobId(appId,1); TaskId taskId=MRBuilderUtils.newTaskId(jobId,1,TaskType.MAP); TaskAttemptId attemptId=MRBuilderUtils.newTaskAttemptId(taskId,0); Path jobFile=mock(Path.class); MockEventHandler eventHandler=new MockEventHandler(); TaskAttemptListener taListener=mock(TaskAttemptListener.class); when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost",0)); JobConf jobConf=new JobConf(); jobConf.setClass("fs.file.impl",StubbedFS.class,FileSystem.class); jobConf.setBoolean("fs.file.impl.disable.cache",true); jobConf.set(JobConf.MAPRED_MAP_TASK_ENV,""); jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID,"10"); TaskSplitMetaInfo splits=mock(TaskSplitMetaInfo.class); when(splits.getLocations()).thenReturn(new String[]{"127.0.0.1"}); TaskAttemptImpl taImpl=new MapTaskAttemptImpl(taskId,1,eventHandler,jobFile,1,splits,jobConf,taListener,new Token(),new Credentials(),new SystemClock(),null); NodeId nid=NodeId.newInstance("127.0.0.1",0); ContainerId contId=ContainerId.newInstance(appAttemptId,3); Container container=mock(Container.class); when(container.getId()).thenReturn(contId); when(container.getNodeId()).thenReturn(nid); taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_SCHEDULE)); taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId,container,mock(Map.class))); taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_KILL)); taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_CONTAINER_CLEANED)); taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_CONTAINER_LAUNCH_FAILED)); assertFalse(eventHandler.internalError); assertEquals("Task attempt is not assigned on the local node",Locality.NODE_LOCAL,taImpl.getLocality()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testTooManyFetchFailureAfterKill() throws Exception { ApplicationId appId=ApplicationId.newInstance(1,2); ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,0); JobId jobId=MRBuilderUtils.newJobId(appId,1); TaskId taskId=MRBuilderUtils.newTaskId(jobId,1,TaskType.MAP); TaskAttemptId attemptId=MRBuilderUtils.newTaskAttemptId(taskId,0); Path jobFile=mock(Path.class); MockEventHandler eventHandler=new MockEventHandler(); TaskAttemptListener taListener=mock(TaskAttemptListener.class); when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost",0)); JobConf jobConf=new JobConf(); jobConf.setClass("fs.file.impl",StubbedFS.class,FileSystem.class); jobConf.setBoolean("fs.file.impl.disable.cache",true); jobConf.set(JobConf.MAPRED_MAP_TASK_ENV,""); jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID,"10"); TaskSplitMetaInfo splits=mock(TaskSplitMetaInfo.class); when(splits.getLocations()).thenReturn(new String[]{"127.0.0.1"}); AppContext appCtx=mock(AppContext.class); ClusterInfo clusterInfo=mock(ClusterInfo.class); Resource resource=mock(Resource.class); when(appCtx.getClusterInfo()).thenReturn(clusterInfo); when(resource.getMemory()).thenReturn(1024); TaskAttemptImpl taImpl=new MapTaskAttemptImpl(taskId,1,eventHandler,jobFile,1,splits,jobConf,taListener,mock(Token.class),new Credentials(),new SystemClock(),appCtx); NodeId nid=NodeId.newInstance("127.0.0.1",0); ContainerId contId=ContainerId.newInstance(appAttemptId,3); Container container=mock(Container.class); when(container.getId()).thenReturn(contId); when(container.getNodeId()).thenReturn(nid); when(container.getNodeHttpAddress()).thenReturn("localhost:0"); taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_SCHEDULE)); taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId,container,mock(Map.class))); taImpl.handle(new TaskAttemptContainerLaunchedEvent(attemptId,0)); taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_DONE)); taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_CONTAINER_CLEANED)); assertEquals("Task attempt is not in succeeded state",taImpl.getState(),TaskAttemptState.SUCCEEDED); taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_KILL)); assertEquals("Task attempt is not in KILLED state",taImpl.getState(),TaskAttemptState.KILLED); taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_TOO_MANY_FETCH_FAILURE)); assertEquals("Task attempt is not in KILLED state, still",taImpl.getState(),TaskAttemptState.KILLED); assertFalse("InternalError occurred trying to handle TA_CONTAINER_CLEANED",eventHandler.internalError); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testContainerCleanedWhileCommitting() throws Exception { ApplicationId appId=ApplicationId.newInstance(1,2); ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,0); JobId jobId=MRBuilderUtils.newJobId(appId,1); TaskId taskId=MRBuilderUtils.newTaskId(jobId,1,TaskType.MAP); TaskAttemptId attemptId=MRBuilderUtils.newTaskAttemptId(taskId,0); Path jobFile=mock(Path.class); MockEventHandler eventHandler=new MockEventHandler(); TaskAttemptListener taListener=mock(TaskAttemptListener.class); when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost",0)); JobConf jobConf=new JobConf(); jobConf.setClass("fs.file.impl",StubbedFS.class,FileSystem.class); jobConf.setBoolean("fs.file.impl.disable.cache",true); jobConf.set(JobConf.MAPRED_MAP_TASK_ENV,""); jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID,"10"); TaskSplitMetaInfo splits=mock(TaskSplitMetaInfo.class); when(splits.getLocations()).thenReturn(new String[]{}); AppContext appCtx=mock(AppContext.class); ClusterInfo clusterInfo=mock(ClusterInfo.class); Resource resource=mock(Resource.class); when(appCtx.getClusterInfo()).thenReturn(clusterInfo); when(resource.getMemory()).thenReturn(1024); TaskAttemptImpl taImpl=new MapTaskAttemptImpl(taskId,1,eventHandler,jobFile,1,splits,jobConf,taListener,new Token(),new Credentials(),new SystemClock(),appCtx); NodeId nid=NodeId.newInstance("127.0.0.1",0); ContainerId contId=ContainerId.newInstance(appAttemptId,3); Container container=mock(Container.class); when(container.getId()).thenReturn(contId); when(container.getNodeId()).thenReturn(nid); when(container.getNodeHttpAddress()).thenReturn("localhost:0"); taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_SCHEDULE)); taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId,container,mock(Map.class))); taImpl.handle(new TaskAttemptContainerLaunchedEvent(attemptId,0)); taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_COMMIT_PENDING)); assertEquals("Task attempt is not in commit pending state",taImpl.getState(),TaskAttemptState.COMMIT_PENDING); taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_CONTAINER_CLEANED)); assertFalse("InternalError occurred trying to handle TA_CONTAINER_CLEANED",eventHandler.internalError); assertEquals("Task attempt is assigned locally",Locality.OFF_SWITCH,taImpl.getLocality()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testContainerKillWhileCommitPending() throws Exception { ApplicationId appId=ApplicationId.newInstance(1,2); ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,0); JobId jobId=MRBuilderUtils.newJobId(appId,1); TaskId taskId=MRBuilderUtils.newTaskId(jobId,1,TaskType.MAP); TaskAttemptId attemptId=MRBuilderUtils.newTaskAttemptId(taskId,0); Path jobFile=mock(Path.class); MockEventHandler eventHandler=new MockEventHandler(); TaskAttemptListener taListener=mock(TaskAttemptListener.class); when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost",0)); JobConf jobConf=new JobConf(); jobConf.setClass("fs.file.impl",StubbedFS.class,FileSystem.class); jobConf.setBoolean("fs.file.impl.disable.cache",true); jobConf.set(JobConf.MAPRED_MAP_TASK_ENV,""); jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID,"10"); TaskSplitMetaInfo splits=mock(TaskSplitMetaInfo.class); when(splits.getLocations()).thenReturn(new String[]{"127.0.0.1"}); AppContext appCtx=mock(AppContext.class); ClusterInfo clusterInfo=mock(ClusterInfo.class); Resource resource=mock(Resource.class); when(appCtx.getClusterInfo()).thenReturn(clusterInfo); when(resource.getMemory()).thenReturn(1024); TaskAttemptImpl taImpl=new MapTaskAttemptImpl(taskId,1,eventHandler,jobFile,1,splits,jobConf,taListener,new Token(),new Credentials(),new SystemClock(),appCtx); NodeId nid=NodeId.newInstance("127.0.0.2",0); ContainerId contId=ContainerId.newInstance(appAttemptId,3); Container container=mock(Container.class); when(container.getId()).thenReturn(contId); when(container.getNodeId()).thenReturn(nid); when(container.getNodeHttpAddress()).thenReturn("localhost:0"); taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_SCHEDULE)); taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId,container,mock(Map.class))); taImpl.handle(new TaskAttemptContainerLaunchedEvent(attemptId,0)); assertEquals("Task attempt is not in running state",taImpl.getState(),TaskAttemptState.RUNNING); taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_COMMIT_PENDING)); assertEquals("Task should be in COMMIT_PENDING state",TaskAttemptStateInternal.COMMIT_PENDING,taImpl.getInternalState()); taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_KILL)); assertFalse("InternalError occurred trying to handle TA_KILL",eventHandler.internalError); assertEquals("Task should be in KILLED state",TaskAttemptStateInternal.KILL_CONTAINER_CLEANUP,taImpl.getInternalState()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testContainerCleanedWhileRunning() throws Exception { ApplicationId appId=ApplicationId.newInstance(1,2); ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,0); JobId jobId=MRBuilderUtils.newJobId(appId,1); TaskId taskId=MRBuilderUtils.newTaskId(jobId,1,TaskType.MAP); TaskAttemptId attemptId=MRBuilderUtils.newTaskAttemptId(taskId,0); Path jobFile=mock(Path.class); MockEventHandler eventHandler=new MockEventHandler(); TaskAttemptListener taListener=mock(TaskAttemptListener.class); when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost",0)); JobConf jobConf=new JobConf(); jobConf.setClass("fs.file.impl",StubbedFS.class,FileSystem.class); jobConf.setBoolean("fs.file.impl.disable.cache",true); jobConf.set(JobConf.MAPRED_MAP_TASK_ENV,""); jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID,"10"); TaskSplitMetaInfo splits=mock(TaskSplitMetaInfo.class); when(splits.getLocations()).thenReturn(new String[]{"127.0.0.1"}); AppContext appCtx=mock(AppContext.class); ClusterInfo clusterInfo=mock(ClusterInfo.class); Resource resource=mock(Resource.class); when(appCtx.getClusterInfo()).thenReturn(clusterInfo); when(resource.getMemory()).thenReturn(1024); TaskAttemptImpl taImpl=new MapTaskAttemptImpl(taskId,1,eventHandler,jobFile,1,splits,jobConf,taListener,new Token(),new Credentials(),new SystemClock(),appCtx); NodeId nid=NodeId.newInstance("127.0.0.2",0); ContainerId contId=ContainerId.newInstance(appAttemptId,3); Container container=mock(Container.class); when(container.getId()).thenReturn(contId); when(container.getNodeId()).thenReturn(nid); when(container.getNodeHttpAddress()).thenReturn("localhost:0"); taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_SCHEDULE)); taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId,container,mock(Map.class))); taImpl.handle(new TaskAttemptContainerLaunchedEvent(attemptId,0)); assertEquals("Task attempt is not in running state",taImpl.getState(),TaskAttemptState.RUNNING); taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_CONTAINER_CLEANED)); assertFalse("InternalError occurred trying to handle TA_CONTAINER_CLEANED",eventHandler.internalError); assertEquals("Task attempt is not assigned on the local rack",Locality.RACK_LOCAL,taImpl.getLocality()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testContainerKillWhileRunning() throws Exception { ApplicationId appId=ApplicationId.newInstance(1,2); ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,0); JobId jobId=MRBuilderUtils.newJobId(appId,1); TaskId taskId=MRBuilderUtils.newTaskId(jobId,1,TaskType.MAP); TaskAttemptId attemptId=MRBuilderUtils.newTaskAttemptId(taskId,0); Path jobFile=mock(Path.class); MockEventHandler eventHandler=new MockEventHandler(); TaskAttemptListener taListener=mock(TaskAttemptListener.class); when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost",0)); JobConf jobConf=new JobConf(); jobConf.setClass("fs.file.impl",StubbedFS.class,FileSystem.class); jobConf.setBoolean("fs.file.impl.disable.cache",true); jobConf.set(JobConf.MAPRED_MAP_TASK_ENV,""); jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID,"10"); TaskSplitMetaInfo splits=mock(TaskSplitMetaInfo.class); when(splits.getLocations()).thenReturn(new String[]{"127.0.0.1"}); AppContext appCtx=mock(AppContext.class); ClusterInfo clusterInfo=mock(ClusterInfo.class); Resource resource=mock(Resource.class); when(appCtx.getClusterInfo()).thenReturn(clusterInfo); when(resource.getMemory()).thenReturn(1024); TaskAttemptImpl taImpl=new MapTaskAttemptImpl(taskId,1,eventHandler,jobFile,1,splits,jobConf,taListener,new Token(),new Credentials(),new SystemClock(),appCtx); NodeId nid=NodeId.newInstance("127.0.0.2",0); ContainerId contId=ContainerId.newInstance(appAttemptId,3); Container container=mock(Container.class); when(container.getId()).thenReturn(contId); when(container.getNodeId()).thenReturn(nid); when(container.getNodeHttpAddress()).thenReturn("localhost:0"); taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_SCHEDULE)); taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId,container,mock(Map.class))); taImpl.handle(new TaskAttemptContainerLaunchedEvent(attemptId,0)); assertEquals("Task attempt is not in running state",taImpl.getState(),TaskAttemptState.RUNNING); taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_KILL)); assertFalse("InternalError occurred trying to handle TA_KILL",eventHandler.internalError); assertEquals("Task should be in KILLED state",TaskAttemptStateInternal.KILL_CONTAINER_CLEANUP,taImpl.getInternalState()); }

Class: org.apache.hadoop.mapreduce.v2.app.job.impl.TestTaskAttemptContainerRequest

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testAttemptContainerRequest() throws Exception { final Text SECRET_KEY_ALIAS=new Text("secretkeyalias"); final byte[] SECRET_KEY=("secretkey").getBytes(); Map acls=new HashMap(1); acls.put(ApplicationAccessType.VIEW_APP,"otheruser"); ApplicationId appId=ApplicationId.newInstance(1,1); JobId jobId=MRBuilderUtils.newJobId(appId,1); TaskId taskId=MRBuilderUtils.newTaskId(jobId,1,TaskType.MAP); Path jobFile=mock(Path.class); EventHandler eventHandler=mock(EventHandler.class); TaskAttemptListener taListener=mock(TaskAttemptListener.class); when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost",0)); JobConf jobConf=new JobConf(); jobConf.setClass("fs.file.impl",StubbedFS.class,FileSystem.class); jobConf.setBoolean("fs.file.impl.disable.cache",true); jobConf.set(JobConf.MAPRED_MAP_TASK_ENV,""); jobConf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,"kerberos"); UserGroupInformation.setConfiguration(jobConf); Credentials credentials=new Credentials(); credentials.addSecretKey(SECRET_KEY_ALIAS,SECRET_KEY); Token jobToken=new Token(("tokenid").getBytes(),("tokenpw").getBytes(),new Text("tokenkind"),new Text("tokenservice")); TaskAttemptImpl taImpl=new MapTaskAttemptImpl(taskId,1,eventHandler,jobFile,1,mock(TaskSplitMetaInfo.class),jobConf,taListener,jobToken,credentials,new SystemClock(),null); jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID,taImpl.getID().toString()); ContainerLaunchContext launchCtx=TaskAttemptImpl.createContainerLaunchContext(acls,jobConf,jobToken,taImpl.createRemoteTask(),TypeConverter.fromYarn(jobId),mock(WrappedJvmID.class),taListener,credentials); Assert.assertEquals("ACLs mismatch",acls,launchCtx.getApplicationACLs()); Credentials launchCredentials=new Credentials(); DataInputByteBuffer dibb=new DataInputByteBuffer(); dibb.reset(launchCtx.getTokens()); launchCredentials.readTokenStorageStream(dibb); for ( Token token : credentials.getAllTokens()) { Token launchToken=launchCredentials.getToken(token.getService()); Assert.assertNotNull("Token " + token.getService() + " is missing",launchToken); Assert.assertEquals("Token " + token.getService() + " mismatch",token,launchToken); } Assert.assertNotNull("Secret key missing",launchCredentials.getSecretKey(SECRET_KEY_ALIAS)); Assert.assertTrue("Secret key mismatch",Arrays.equals(SECRET_KEY,launchCredentials.getSecretKey(SECRET_KEY_ALIAS))); }

Class: org.apache.hadoop.mapreduce.v2.app.job.impl.TestTaskImpl

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testFailureDuringTaskAttemptCommit(){ mockTask=createMockTask(TaskType.MAP); TaskId taskId=getNewTaskID(); scheduleTaskAttempt(taskId); launchTaskAttempt(getLastAttempt().getAttemptId()); updateLastAttemptState(TaskAttemptState.COMMIT_PENDING); commitTaskAttempt(getLastAttempt().getAttemptId()); updateLastAttemptState(TaskAttemptState.FAILED); failRunningTaskAttempt(getLastAttempt().getAttemptId()); assertEquals(2,taskAttempts.size()); updateLastAttemptState(TaskAttemptState.SUCCEEDED); commitTaskAttempt(getLastAttempt().getAttemptId()); mockTask.handle(new TaskTAttemptEvent(getLastAttempt().getAttemptId(),TaskEventType.T_ATTEMPT_SUCCEEDED)); assertFalse("First attempt should not commit",mockTask.canCommit(taskAttempts.get(0).getAttemptId())); assertTrue("Second attempt should commit",mockTask.canCommit(getLastAttempt().getAttemptId())); assertTaskSucceededState(); }

Class: org.apache.hadoop.mapreduce.v2.app.launcher.TestContainerLauncher

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=5000) public void testPoolLimits() throws InterruptedException { ApplicationId appId=ApplicationId.newInstance(12345,67); ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,3); JobId jobId=MRBuilderUtils.newJobId(appId,8); TaskId taskId=MRBuilderUtils.newTaskId(jobId,9,TaskType.MAP); TaskAttemptId taskAttemptId=MRBuilderUtils.newTaskAttemptId(taskId,0); ContainerId containerId=ContainerId.newInstance(appAttemptId,10); AppContext context=mock(AppContext.class); CustomContainerLauncher containerLauncher=new CustomContainerLauncher(context); Configuration conf=new Configuration(); conf.setInt(MRJobConfig.MR_AM_CONTAINERLAUNCHER_THREAD_COUNT_LIMIT,12); containerLauncher.init(conf); containerLauncher.start(); ThreadPoolExecutor threadPool=containerLauncher.getThreadPool(); containerLauncher.expectedCorePoolSize=ContainerLauncherImpl.INITIAL_POOL_SIZE; for (int i=0; i < 10; i++) { containerLauncher.handle(new ContainerLauncherEvent(taskAttemptId,containerId,"host" + i + ":1234",null,ContainerLauncher.EventType.CONTAINER_REMOTE_LAUNCH)); } waitForEvents(containerLauncher,10); Assert.assertEquals(10,threadPool.getPoolSize()); Assert.assertNull(containerLauncher.foundErrors); containerLauncher.expectedCorePoolSize=12; for (int i=1; i <= 4; i++) { containerLauncher.handle(new ContainerLauncherEvent(taskAttemptId,containerId,"host1" + i + ":1234",null,ContainerLauncher.EventType.CONTAINER_REMOTE_LAUNCH)); } waitForEvents(containerLauncher,12); Assert.assertEquals(12,threadPool.getPoolSize()); Assert.assertNull(containerLauncher.foundErrors); containerLauncher.finishEventHandling=true; waitForEvents(containerLauncher,14); Assert.assertEquals(12,threadPool.getPoolSize()); Assert.assertNull(containerLauncher.foundErrors); containerLauncher.stop(); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=15000) public void testSlowNM() throws Exception { conf=new Configuration(); int maxAttempts=1; conf.setInt(MRJobConfig.MAP_MAX_ATTEMPTS,maxAttempts); conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false); conf.setInt("yarn.rpc.nm-command-timeout",3000); conf.set(YarnConfiguration.IPC_RPC_IMPL,HadoopYarnProtoRPC.class.getName()); YarnRPC rpc=YarnRPC.create(conf); String bindAddr="localhost:0"; InetSocketAddress addr=NetUtils.createSocketAddr(bindAddr); NMTokenSecretManagerInNM tokenSecretManager=new NMTokenSecretManagerInNM(); MasterKey masterKey=Records.newRecord(MasterKey.class); masterKey.setBytes(ByteBuffer.wrap("key".getBytes())); tokenSecretManager.setMasterKey(masterKey); conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,"token"); server=rpc.getServer(ContainerManagementProtocol.class,new DummyContainerManager(),addr,conf,tokenSecretManager,1); server.start(); MRApp app=new MRAppWithSlowNM(tokenSecretManager); try { Job job=app.submit(conf); app.waitForState(job,JobState.RUNNING); Map tasks=job.getTasks(); Assert.assertEquals("Num tasks is not correct",1,tasks.size()); Task task=tasks.values().iterator().next(); app.waitForState(task,TaskState.SCHEDULED); Map attempts=tasks.values().iterator().next().getAttempts(); Assert.assertEquals("Num attempts is not correct",maxAttempts,attempts.size()); TaskAttempt attempt=attempts.values().iterator().next(); app.waitForInternalState((TaskAttemptImpl)attempt,TaskAttemptStateInternal.ASSIGNED); app.waitForState(job,JobState.FAILED); String diagnostics=attempt.getDiagnostics().toString(); LOG.info("attempt.getDiagnostics: " + diagnostics); Assert.assertTrue(diagnostics.contains("Container launch failed for " + "container_0_0000_01_000000 : ")); Assert.assertTrue(diagnostics.contains("java.net.SocketTimeoutException: 3000 millis timeout while waiting for channel")); } finally { server.stop(); app.stop(); } }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=5000) public void testPoolSize() throws InterruptedException { ApplicationId appId=ApplicationId.newInstance(12345,67); ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,3); JobId jobId=MRBuilderUtils.newJobId(appId,8); TaskId taskId=MRBuilderUtils.newTaskId(jobId,9,TaskType.MAP); AppContext context=mock(AppContext.class); CustomContainerLauncher containerLauncher=new CustomContainerLauncher(context); containerLauncher.init(new Configuration()); containerLauncher.start(); ThreadPoolExecutor threadPool=containerLauncher.getThreadPool(); Assert.assertEquals(0,threadPool.getPoolSize()); Assert.assertEquals(ContainerLauncherImpl.INITIAL_POOL_SIZE,threadPool.getCorePoolSize()); Assert.assertNull(containerLauncher.foundErrors); containerLauncher.expectedCorePoolSize=ContainerLauncherImpl.INITIAL_POOL_SIZE; for (int i=0; i < 10; i++) { ContainerId containerId=ContainerId.newInstance(appAttemptId,i); TaskAttemptId taskAttemptId=MRBuilderUtils.newTaskAttemptId(taskId,i); containerLauncher.handle(new ContainerLauncherEvent(taskAttemptId,containerId,"host" + i + ":1234",null,ContainerLauncher.EventType.CONTAINER_REMOTE_LAUNCH)); } waitForEvents(containerLauncher,10); Assert.assertEquals(10,threadPool.getPoolSize()); Assert.assertNull(containerLauncher.foundErrors); containerLauncher.finishEventHandling=true; int timeOut=0; while (containerLauncher.numEventsProcessed.get() < 10 && timeOut++ < 200) { LOG.info("Waiting for number of events processed to become " + 10 + ". It is now "+ containerLauncher.numEventsProcessed.get()+ ". Timeout is "+ timeOut); Thread.sleep(1000); } Assert.assertEquals(10,containerLauncher.numEventsProcessed.get()); containerLauncher.finishEventHandling=false; for (int i=0; i < 10; i++) { ContainerId containerId=ContainerId.newInstance(appAttemptId,i + 10); TaskAttemptId taskAttemptId=MRBuilderUtils.newTaskAttemptId(taskId,i + 10); containerLauncher.handle(new ContainerLauncherEvent(taskAttemptId,containerId,"host" + i + ":1234",null,ContainerLauncher.EventType.CONTAINER_REMOTE_LAUNCH)); } waitForEvents(containerLauncher,20); Assert.assertEquals(10,threadPool.getPoolSize()); Assert.assertNull(containerLauncher.foundErrors); containerLauncher.expectedCorePoolSize=11 + ContainerLauncherImpl.INITIAL_POOL_SIZE; containerLauncher.finishEventHandling=false; ContainerId containerId=ContainerId.newInstance(appAttemptId,21); TaskAttemptId taskAttemptId=MRBuilderUtils.newTaskAttemptId(taskId,21); containerLauncher.handle(new ContainerLauncherEvent(taskAttemptId,containerId,"host11:1234",null,ContainerLauncher.EventType.CONTAINER_REMOTE_LAUNCH)); waitForEvents(containerLauncher,21); Assert.assertEquals(11,threadPool.getPoolSize()); Assert.assertNull(containerLauncher.foundErrors); containerLauncher.stop(); }

Class: org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testHeartbeatHandler() throws Exception { LOG.info("Running testHeartbeatHandler"); Configuration conf=new Configuration(); conf.setInt(MRJobConfig.MR_AM_TO_RM_HEARTBEAT_INTERVAL_MS,1); ControlledClock clock=new ControlledClock(new SystemClock()); AppContext appContext=mock(AppContext.class); when(appContext.getClock()).thenReturn(clock); when(appContext.getApplicationID()).thenReturn(ApplicationId.newInstance(1,1)); RMContainerAllocator allocator=new RMContainerAllocator(mock(ClientService.class),appContext,new NoopAMPreemptionPolicy()){ @Override protected void register(){ } @Override protected ApplicationMasterProtocol createSchedulerProxy(){ return mock(ApplicationMasterProtocol.class); } @Override protected synchronized void heartbeat() throws Exception { } } ; allocator.init(conf); allocator.start(); clock.setTime(5); int timeToWaitMs=5000; while (allocator.getLastHeartbeatTime() != 5 && timeToWaitMs > 0) { Thread.sleep(10); timeToWaitMs-=10; } Assert.assertEquals(5,allocator.getLastHeartbeatTime()); clock.setTime(7); timeToWaitMs=5000; while (allocator.getLastHeartbeatTime() != 7 && timeToWaitMs > 0) { Thread.sleep(10); timeToWaitMs-=10; } Assert.assertEquals(7,allocator.getLastHeartbeatTime()); final AtomicBoolean callbackCalled=new AtomicBoolean(false); allocator.runOnNextHeartbeat(new Runnable(){ @Override public void run(){ callbackCalled.set(true); } } ); clock.setTime(8); timeToWaitMs=5000; while (allocator.getLastHeartbeatTime() != 8 && timeToWaitMs > 0) { Thread.sleep(10); timeToWaitMs-=10; } Assert.assertEquals(8,allocator.getLastHeartbeatTime()); Assert.assertTrue(callbackCalled.get()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testBlackListedNodes() throws Exception { LOG.info("Running testBlackListedNodes"); Configuration conf=new Configuration(); conf.setBoolean(MRJobConfig.MR_AM_JOB_NODE_BLACKLISTING_ENABLE,true); conf.setInt(MRJobConfig.MAX_TASK_FAILURES_PER_TRACKER,1); conf.setInt(MRJobConfig.MR_AM_IGNORE_BLACKLISTING_BLACKLISTED_NODE_PERECENT,-1); MyResourceManager rm=new MyResourceManager(conf); rm.start(); DrainDispatcher dispatcher=(DrainDispatcher)rm.getRMContext().getDispatcher(); RMApp app=rm.submitApp(1024); dispatcher.await(); MockNM amNodeManager=rm.registerNode("amNM:1234",2048); amNodeManager.nodeHeartbeat(true); dispatcher.await(); ApplicationAttemptId appAttemptId=app.getCurrentAppAttempt().getAppAttemptId(); rm.sendAMLaunched(appAttemptId); dispatcher.await(); JobId jobId=MRBuilderUtils.newJobId(appAttemptId.getApplicationId(),0); Job mockJob=mock(Job.class); when(mockJob.getReport()).thenReturn(MRBuilderUtils.newJobReport(jobId,"job","user",JobState.RUNNING,0,0,0,0,0,0,0,"jobfile",null,false,"")); MyContainerAllocator allocator=new MyContainerAllocator(rm,conf,appAttemptId,mockJob); MockNM nodeManager1=rm.registerNode("h1:1234",10240); MockNM nodeManager2=rm.registerNode("h2:1234",10240); MockNM nodeManager3=rm.registerNode("h3:1234",10240); dispatcher.await(); ContainerRequestEvent event1=createReq(jobId,1,1024,new String[]{"h1"}); allocator.sendRequest(event1); ContainerRequestEvent event2=createReq(jobId,2,1024,new String[]{"h2"}); allocator.sendRequest(event2); ContainerRequestEvent event3=createReq(jobId,3,1024,new String[]{"h3"}); allocator.sendRequest(event3); List assigned=allocator.schedule(); dispatcher.await(); Assert.assertEquals("No of assignments must be 0",0,assigned.size()); ContainerFailedEvent f1=createFailEvent(jobId,1,"h1",false); allocator.sendFailure(f1); ContainerFailedEvent f2=createFailEvent(jobId,1,"h2",false); allocator.sendFailure(f2); nodeManager1.nodeHeartbeat(true); nodeManager2.nodeHeartbeat(true); dispatcher.await(); assigned=allocator.schedule(); Assert.assertEquals("No of assignments must be 0",0,assigned.size()); dispatcher.await(); Assert.assertEquals("No of assignments must be 0",0,assigned.size()); assertBlacklistAdditionsAndRemovals(2,0,rm); nodeManager1.nodeHeartbeat(false); nodeManager2.nodeHeartbeat(false); dispatcher.await(); assigned=allocator.schedule(); dispatcher.await(); assertBlacklistAdditionsAndRemovals(0,0,rm); Assert.assertEquals("No of assignments must be 0",0,assigned.size()); nodeManager3.nodeHeartbeat(true); dispatcher.await(); assigned=allocator.schedule(); dispatcher.await(); assertBlacklistAdditionsAndRemovals(0,0,rm); Assert.assertTrue("No of assignments must be 3",assigned.size() == 3); for ( TaskAttemptContainerAssignedEvent assig : assigned) { Assert.assertTrue("Assigned container host not correct","h3".equals(assig.getContainer().getNodeId().getHost())); } }

APIUtilityVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testMapNodeLocality() throws Exception { LOG.info("Running testMapNodeLocality"); Configuration conf=new Configuration(); MyResourceManager rm=new MyResourceManager(conf); rm.start(); DrainDispatcher dispatcher=(DrainDispatcher)rm.getRMContext().getDispatcher(); RMApp app=rm.submitApp(1024); dispatcher.await(); MockNM amNodeManager=rm.registerNode("amNM:1234",2048); amNodeManager.nodeHeartbeat(true); dispatcher.await(); ApplicationAttemptId appAttemptId=app.getCurrentAppAttempt().getAppAttemptId(); rm.sendAMLaunched(appAttemptId); dispatcher.await(); JobId jobId=MRBuilderUtils.newJobId(appAttemptId.getApplicationId(),0); Job mockJob=mock(Job.class); when(mockJob.getReport()).thenReturn(MRBuilderUtils.newJobReport(jobId,"job","user",JobState.RUNNING,0,0,0,0,0,0,0,"jobfile",null,false,"")); MyContainerAllocator allocator=new MyContainerAllocator(rm,conf,appAttemptId,mockJob); MockNM nodeManager1=rm.registerNode("h1:1234",3072); rm.registerNode("h2:1234",10240); MockNM nodeManager3=rm.registerNode("h3:1234",1536); dispatcher.await(); ContainerRequestEvent event1=createReq(jobId,1,1024,new String[]{"h1"}); allocator.sendRequest(event1); ContainerRequestEvent event2=createReq(jobId,2,1024,new String[]{"h1"}); allocator.sendRequest(event2); ContainerRequestEvent event3=createReq(jobId,3,1024,new String[]{"h2"}); allocator.sendRequest(event3); List assigned=allocator.schedule(); dispatcher.await(); Assert.assertEquals("No of assignments must be 0",0,assigned.size()); nodeManager3.nodeHeartbeat(true); nodeManager1.nodeHeartbeat(true); dispatcher.await(); assigned=allocator.schedule(); dispatcher.await(); checkAssignments(new ContainerRequestEvent[]{event1,event2,event3},assigned,false); for ( TaskAttemptContainerAssignedEvent event : assigned) { if (event.getTaskAttemptID().equals(event3.getAttemptID())) { assigned.remove(event); Assert.assertTrue(event.getContainer().getNodeId().getHost().equals("h3")); break; } } checkAssignments(new ContainerRequestEvent[]{event1,event2},assigned,true); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testUpdatedNodes() throws Exception { Configuration conf=new Configuration(); MyResourceManager rm=new MyResourceManager(conf); rm.start(); DrainDispatcher dispatcher=(DrainDispatcher)rm.getRMContext().getDispatcher(); RMApp app=rm.submitApp(1024); dispatcher.await(); MockNM amNodeManager=rm.registerNode("amNM:1234",2048); amNodeManager.nodeHeartbeat(true); dispatcher.await(); ApplicationAttemptId appAttemptId=app.getCurrentAppAttempt().getAppAttemptId(); rm.sendAMLaunched(appAttemptId); dispatcher.await(); JobId jobId=MRBuilderUtils.newJobId(appAttemptId.getApplicationId(),0); Job mockJob=mock(Job.class); MyContainerAllocator allocator=new MyContainerAllocator(rm,conf,appAttemptId,mockJob); MockNM nm1=rm.registerNode("h1:1234",10240); MockNM nm2=rm.registerNode("h2:1234",10240); dispatcher.await(); ContainerRequestEvent event=createReq(jobId,1,1024,new String[]{"h1"}); allocator.sendRequest(event); TaskAttemptId attemptId=event.getAttemptID(); TaskAttempt mockTaskAttempt=mock(TaskAttempt.class); when(mockTaskAttempt.getNodeId()).thenReturn(nm1.getNodeId()); Task mockTask=mock(Task.class); when(mockTask.getAttempt(attemptId)).thenReturn(mockTaskAttempt); when(mockJob.getTask(attemptId.getTaskId())).thenReturn(mockTask); List assigned=allocator.schedule(); dispatcher.await(); nm1.nodeHeartbeat(true); dispatcher.await(); Assert.assertEquals(1,allocator.getJobUpdatedNodeEvents().size()); Assert.assertEquals(3,allocator.getJobUpdatedNodeEvents().get(0).getUpdatedNodes().size()); allocator.getJobUpdatedNodeEvents().clear(); assigned=allocator.schedule(); dispatcher.await(); Assert.assertEquals(1,assigned.size()); Assert.assertEquals(nm1.getNodeId(),assigned.get(0).getContainer().getNodeId()); Assert.assertTrue(allocator.getJobUpdatedNodeEvents().isEmpty()); Assert.assertTrue(allocator.getTaskAttemptKillEvents().isEmpty()); nm1.nodeHeartbeat(false); nm2.nodeHeartbeat(false); dispatcher.await(); assigned=allocator.schedule(); dispatcher.await(); Assert.assertEquals(0,assigned.size()); Assert.assertEquals(1,allocator.getJobUpdatedNodeEvents().size()); Assert.assertEquals(1,allocator.getTaskAttemptKillEvents().size()); Assert.assertEquals(2,allocator.getJobUpdatedNodeEvents().get(0).getUpdatedNodes().size()); Assert.assertEquals(attemptId,allocator.getTaskAttemptKillEvents().get(0).getTaskAttemptID()); allocator.getJobUpdatedNodeEvents().clear(); allocator.getTaskAttemptKillEvents().clear(); assigned=allocator.schedule(); dispatcher.await(); Assert.assertEquals(0,assigned.size()); Assert.assertTrue(allocator.getJobUpdatedNodeEvents().isEmpty()); Assert.assertTrue(allocator.getTaskAttemptKillEvents().isEmpty()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testRMContainerAllocatorResendsRequestsOnRMRestart() throws Exception { Configuration conf=new Configuration(); conf.set(YarnConfiguration.RECOVERY_ENABLED,"true"); conf.set(YarnConfiguration.RM_STORE,MemoryRMStateStore.class.getName()); conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS); conf.setBoolean(YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_ENABLED,true); conf.setBoolean(MRJobConfig.MR_AM_JOB_NODE_BLACKLISTING_ENABLE,true); conf.setInt(MRJobConfig.MAX_TASK_FAILURES_PER_TRACKER,1); conf.setInt(MRJobConfig.MR_AM_IGNORE_BLACKLISTING_BLACKLISTED_NODE_PERECENT,-1); MemoryRMStateStore memStore=new MemoryRMStateStore(); memStore.init(conf); MyResourceManager rm1=new MyResourceManager(conf,memStore); rm1.start(); DrainDispatcher dispatcher=(DrainDispatcher)rm1.getRMContext().getDispatcher(); RMApp app=rm1.submitApp(1024); dispatcher.await(); MockNM nm1=new MockNM("h1:1234",15120,rm1.getResourceTrackerService()); nm1.registerNode(); nm1.nodeHeartbeat(true); dispatcher.await(); ApplicationAttemptId appAttemptId=app.getCurrentAppAttempt().getAppAttemptId(); rm1.sendAMLaunched(appAttemptId); dispatcher.await(); JobId jobId=MRBuilderUtils.newJobId(appAttemptId.getApplicationId(),0); Job mockJob=mock(Job.class); when(mockJob.getReport()).thenReturn(MRBuilderUtils.newJobReport(jobId,"job","user",JobState.RUNNING,0,0,0,0,0,0,0,"jobfile",null,false,"")); MyContainerAllocator allocator=new MyContainerAllocator(rm1,conf,appAttemptId,mockJob); ContainerRequestEvent event1=createReq(jobId,1,1024,new String[]{"h1"}); allocator.sendRequest(event1); ContainerRequestEvent event2=createReq(jobId,2,2048,new String[]{"h1","h2"}); allocator.sendRequest(event2); ContainerFailedEvent f1=createFailEvent(jobId,1,"h2",false); allocator.sendFailure(f1); List assignedContainers=allocator.schedule(); dispatcher.await(); Assert.assertEquals("No of assignments must be 0",0,assignedContainers.size()); assertAsksAndReleases(3,0,rm1); assertBlacklistAdditionsAndRemovals(1,0,rm1); nm1.nodeHeartbeat(true); dispatcher.await(); assignedContainers=allocator.schedule(); dispatcher.await(); Assert.assertEquals("No of assignments must be 2",2,assignedContainers.size()); assertAsksAndReleases(0,0,rm1); assertBlacklistAdditionsAndRemovals(0,0,rm1); assignedContainers=allocator.schedule(); Assert.assertEquals("No of assignments must be 0",0,assignedContainers.size()); assertAsksAndReleases(3,0,rm1); assertBlacklistAdditionsAndRemovals(0,0,rm1); ContainerRequestEvent event3=createReq(jobId,3,1000,new String[]{"h1"}); allocator.sendRequest(event3); ContainerAllocatorEvent deallocate1=createDeallocateEvent(jobId,1,false); allocator.sendDeallocate(deallocate1); assignedContainers=allocator.schedule(); Assert.assertEquals("No of assignments must be 0",0,assignedContainers.size()); assertAsksAndReleases(3,1,rm1); assertBlacklistAdditionsAndRemovals(0,0,rm1); MyResourceManager rm2=new MyResourceManager(conf,memStore); rm2.start(); nm1.setResourceTrackerService(rm2.getResourceTrackerService()); allocator.updateSchedulerProxy(rm2); dispatcher=(DrainDispatcher)rm2.getRMContext().getDispatcher(); NodeHeartbeatResponse hbResponse=nm1.nodeHeartbeat(true); Assert.assertEquals(NodeAction.RESYNC,hbResponse.getNodeAction()); nm1=new MockNM("h1:1234",10240,rm2.getResourceTrackerService()); nm1.registerNode(); nm1.nodeHeartbeat(true); dispatcher.await(); ContainerAllocatorEvent deallocate2=createDeallocateEvent(jobId,2,false); allocator.sendDeallocate(deallocate2); ContainerFailedEvent f2=createFailEvent(jobId,1,"h3",false); allocator.sendFailure(f2); ContainerRequestEvent event4=createReq(jobId,4,2000,new String[]{"h1","h2"}); allocator.sendRequest(event4); allocator.schedule(); dispatcher.await(); Assert.assertTrue("Last allocate response is not RESYNC",allocator.isResyncCommand()); ContainerRequestEvent event5=createReq(jobId,5,3000,new String[]{"h1","h2","h3"}); allocator.sendRequest(event5); assignedContainers=allocator.schedule(); dispatcher.await(); assertAsksAndReleases(3,2,rm2); assertBlacklistAdditionsAndRemovals(2,0,rm2); nm1.nodeHeartbeat(true); dispatcher.await(); assignedContainers=allocator.schedule(); dispatcher.await(); Assert.assertEquals("Number of container should be 3",3,assignedContainers.size()); for ( TaskAttemptContainerAssignedEvent assig : assignedContainers) { Assert.assertTrue("Assigned count not correct","h1".equals(assig.getContainer().getNodeId().getHost())); } rm1.stop(); rm2.stop(); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testMapReduceScheduling() throws Exception { LOG.info("Running testMapReduceScheduling"); Configuration conf=new Configuration(); MyResourceManager rm=new MyResourceManager(conf); rm.start(); DrainDispatcher dispatcher=(DrainDispatcher)rm.getRMContext().getDispatcher(); RMApp app=rm.submitApp(1024); dispatcher.await(); MockNM amNodeManager=rm.registerNode("amNM:1234",2048); amNodeManager.nodeHeartbeat(true); dispatcher.await(); ApplicationAttemptId appAttemptId=app.getCurrentAppAttempt().getAppAttemptId(); rm.sendAMLaunched(appAttemptId); dispatcher.await(); JobId jobId=MRBuilderUtils.newJobId(appAttemptId.getApplicationId(),0); Job mockJob=mock(Job.class); when(mockJob.getReport()).thenReturn(MRBuilderUtils.newJobReport(jobId,"job","user",JobState.RUNNING,0,0,0,0,0,0,0,"jobfile",null,false,"")); MyContainerAllocator allocator=new MyContainerAllocator(rm,conf,appAttemptId,mockJob); MockNM nodeManager1=rm.registerNode("h1:1234",1024); MockNM nodeManager2=rm.registerNode("h2:1234",10240); MockNM nodeManager3=rm.registerNode("h3:1234",10240); dispatcher.await(); ContainerRequestEvent event1=createReq(jobId,1,2048,new String[]{"h1","h2"},true,false); allocator.sendRequest(event1); ContainerRequestEvent event2=createReq(jobId,2,3000,new String[]{"h1"},false,true); allocator.sendRequest(event2); ContainerRequestEvent event3=createReq(jobId,3,2048,new String[]{"h3"},false,false); allocator.sendRequest(event3); List assigned=allocator.schedule(); dispatcher.await(); Assert.assertEquals("No of assignments must be 0",0,assigned.size()); nodeManager1.nodeHeartbeat(true); nodeManager2.nodeHeartbeat(true); nodeManager3.nodeHeartbeat(true); dispatcher.await(); assigned=allocator.schedule(); dispatcher.await(); checkAssignments(new ContainerRequestEvent[]{event1,event3},assigned,false); for ( TaskAttemptContainerAssignedEvent assig : assigned) { Assert.assertFalse("Assigned count not correct","h1".equals(assig.getContainer().getNodeId().getHost())); } }

Class: org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebApp

UtilityVerifier BooleanVerifier HybridVerifier 
@Test public void testMRWebAppSSLDisabled() throws Exception { MRApp app=new MRApp(2,2,true,this.getClass().getName(),true){ @Override protected ClientService createClientService( AppContext context){ return new MRClientService(context); } } ; Configuration conf=new Configuration(); conf.set(YarnConfiguration.YARN_HTTP_POLICY_KEY,Policy.HTTPS_ONLY.name()); Job job=app.submit(conf); String hostPort=NetUtils.getHostPortString(((MRClientService)app.getClientService()).getWebApp().getListenerAddress()); URL httpUrl=new URL("http://" + hostPort); HttpURLConnection conn=(HttpURLConnection)httpUrl.openConnection(); InputStream in=conn.getInputStream(); ByteArrayOutputStream out=new ByteArrayOutputStream(); IOUtils.copyBytes(in,out,1024); Assert.assertTrue(out.toString().contains("MapReduce Application")); URL httpsUrl=new URL("https://" + hostPort); try { HttpURLConnection httpsConn=(HttpURLConnection)httpsUrl.openConnection(); httpsConn.getInputStream(); Assert.fail("https:// is not accessible, expected to fail"); } catch ( Exception e) { Assert.assertTrue(e instanceof SSLException); } app.waitForState(job,JobState.SUCCEEDED); app.verifyCompleted(); }

Class: org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testInvalidAccept() throws JSONException, Exception { WebResource r=resource(); String responseStr=""; try { responseStr=r.path("ws").path("v1").path("mapreduce").accept(MediaType.TEXT_PLAIN).get(String.class); fail("should have thrown exception on invalid uri"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.INTERNAL_SERVER_ERROR,response.getClientResponseStatus()); WebServicesTestUtils.checkStringMatch("error string exists and shouldn't","",responseStr); } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testInvalidUri2() throws JSONException, Exception { WebResource r=resource(); String responseStr=""; try { responseStr=r.path("ws").path("v1").path("invalid").accept(MediaType.APPLICATION_JSON).get(String.class); fail("should have thrown exception on invalid uri"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.NOT_FOUND,response.getClientResponseStatus()); WebServicesTestUtils.checkStringMatch("error string exists and shouldn't","",responseStr); } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testInvalidUri() throws JSONException, Exception { WebResource r=resource(); String responseStr=""; try { responseStr=r.path("ws").path("v1").path("mapreduce").path("bogus").accept(MediaType.APPLICATION_JSON).get(String.class); fail("should have thrown exception on invalid uri"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.NOT_FOUND,response.getClientResponseStatus()); WebServicesTestUtils.checkStringMatch("error string exists and shouldn't","",responseStr); } }

Class: org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testJobIdInvalidXML() throws JSONException, Exception { WebResource r=resource(); try { r.path("ws").path("v1").path("mapreduce").path("jobs").path("job_foo").accept(MediaType.APPLICATION_XML).get(JSONObject.class); fail("should have thrown exception on invalid uri"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.NOT_FOUND,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType()); String msg=response.getEntity(String.class); System.out.println(msg); DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance(); DocumentBuilder db=dbf.newDocumentBuilder(); InputSource is=new InputSource(); is.setCharacterStream(new StringReader(msg)); Document dom=db.parse(is); NodeList nodes=dom.getElementsByTagName("RemoteException"); Element element=(Element)nodes.item(0); String message=WebServicesTestUtils.getXmlString(element,"message"); String type=WebServicesTestUtils.getXmlString(element,"exception"); String classname=WebServicesTestUtils.getXmlString(element,"javaClassName"); verifyJobIdInvalid(message,type,classname); } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testJobIdInvalid() throws JSONException, Exception { WebResource r=resource(); try { r.path("ws").path("v1").path("mapreduce").path("jobs").path("job_foo").accept(MediaType.APPLICATION_JSON).get(JSONObject.class); fail("should have thrown exception on invalid uri"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.NOT_FOUND,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); verifyJobIdInvalid(message,type,classname); } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testJobIdInvalidDefault() throws JSONException, Exception { WebResource r=resource(); try { r.path("ws").path("v1").path("mapreduce").path("jobs").path("job_foo").get(JSONObject.class); fail("should have thrown exception on invalid uri"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.NOT_FOUND,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); verifyJobIdInvalid(message,type,classname); } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testJobIdNonExist() throws JSONException, Exception { WebResource r=resource(); try { r.path("ws").path("v1").path("mapreduce").path("jobs").path("job_0_1234").get(JSONObject.class); fail("should have thrown exception on invalid uri"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.NOT_FOUND,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: job, job_0_1234, is not found",message); WebServicesTestUtils.checkStringMatch("exception type","NotFoundException",type); WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.NotFoundException",classname); } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testJobIdInvalidBogus() throws JSONException, Exception { WebResource r=resource(); try { r.path("ws").path("v1").path("mapreduce").path("jobs").path("bogusfoo").get(JSONObject.class); fail("should have thrown exception on invalid uri"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.NOT_FOUND,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: JobId string : bogusfoo is not properly formed",message); WebServicesTestUtils.checkStringMatch("exception type","NotFoundException",type); WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.NotFoundException",classname); } }

Class: org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesTasks

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testTaskIdInvalid() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); String tid="task_0_0000_d_000000"; try { r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).get(JSONObject.class); fail("should have thrown exception on invalid uri"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.NOT_FOUND,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: Bad TaskType identifier. TaskId string : " + "task_0_0000_d_000000 is not properly formed.",message); WebServicesTestUtils.checkStringMatch("exception type","NotFoundException",type); WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.NotFoundException",classname); } } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testTaskIdBogus() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); String tid="bogustaskid"; try { r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).get(JSONObject.class); fail("should have thrown exception on invalid uri"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.NOT_FOUND,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: TaskId string : " + "bogustaskid is not properly formed",message); WebServicesTestUtils.checkStringMatch("exception type","NotFoundException",type); WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.NotFoundException",classname); } } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testTasksQueryInvalid() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); String tasktype="reduce"; try { r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").queryParam("type",tasktype).accept(MediaType.APPLICATION_JSON).get(JSONObject.class); fail("should have thrown exception on invalid uri"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: tasktype must be either m or r",message); WebServicesTestUtils.checkStringMatch("exception type","BadRequestException",type); WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.BadRequestException",classname); } } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testTaskIdNonExist() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); String tid="task_0_0000_m_000000"; try { r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).get(JSONObject.class); fail("should have thrown exception on invalid uri"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.NOT_FOUND,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: task not found with id task_0_0000_m_000000",message); WebServicesTestUtils.checkStringMatch("exception type","NotFoundException",type); WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.NotFoundException",classname); } } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testTaskIdInvalid3() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); String tid="task_0_0000_m"; try { r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).get(JSONObject.class); fail("should have thrown exception on invalid uri"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.NOT_FOUND,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: TaskId string : " + "task_0_0000_m is not properly formed",message); WebServicesTestUtils.checkStringMatch("exception type","NotFoundException",type); WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.NotFoundException",classname); } } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testTaskIdInvalid2() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); String tid="task_0_m_000000"; try { r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).get(JSONObject.class); fail("should have thrown exception on invalid uri"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.NOT_FOUND,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: TaskId string : " + "task_0_m_000000 is not properly formed",message); WebServicesTestUtils.checkStringMatch("exception type","NotFoundException",type); WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.NotFoundException",classname); } } }

Class: org.apache.hadoop.mapreduce.v2.app.webapp.TestAppController

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Test method 'singleTaskCounter'. Should set SingleCounterPage class for rendering */ @Test public void testGetSingleTaskCounter() throws IOException { appController.singleTaskCounter(); assertEquals(SingleCounterPage.class,appController.getClazz()); assertNotNull(appController.getProperty().get(AppController.COUNTER_GROUP)); assertNotNull(appController.getProperty().get(AppController.COUNTER_NAME)); }

Class: org.apache.hadoop.mapreduce.v2.hs.TestCompletedTask

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * test some methods of CompletedTaskAttempt */ @Test(timeout=5000) public void testCompletedTaskAttempt(){ TaskAttemptInfo attemptInfo=mock(TaskAttemptInfo.class); when(attemptInfo.getRackname()).thenReturn("Rackname"); when(attemptInfo.getShuffleFinishTime()).thenReturn(11L); when(attemptInfo.getSortFinishTime()).thenReturn(12L); when(attemptInfo.getShufflePort()).thenReturn(10); JobID jobId=new JobID("12345",0); TaskID taskId=new TaskID(jobId,TaskType.REDUCE,0); TaskAttemptID taskAttemptId=new TaskAttemptID(taskId,0); when(attemptInfo.getAttemptId()).thenReturn(taskAttemptId); CompletedTaskAttempt taskAttemt=new CompletedTaskAttempt(null,attemptInfo); assertEquals("Rackname",taskAttemt.getNodeRackName()); assertEquals(Phase.CLEANUP,taskAttemt.getPhase()); assertTrue(taskAttemt.isFinished()); assertEquals(11L,taskAttemt.getShuffleFinishTime()); assertEquals(12L,taskAttemt.getSortFinishTime()); assertEquals(10,taskAttemt.getShufflePort()); }

Class: org.apache.hadoop.mapreduce.v2.hs.TestHistoryFileManager

UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
@Test public void testCreateDirsWithFileSystemBecomingAvailBeforeTimeout() throws Exception { dfsCluster.getFileSystem().setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER); Assert.assertTrue(dfsCluster.getFileSystem().isInSafeMode()); new Thread(){ @Override public void run(){ try { Thread.sleep(500); dfsCluster.getFileSystem().setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE); Assert.assertTrue(dfsCluster.getFileSystem().isInSafeMode()); } catch ( Exception ex) { Assert.fail(ex.toString()); } } } .start(); testCreateHistoryDirs(dfsCluster.getConfiguration(0),new SystemClock()); }

UtilityVerifier InternalCallVerifier BooleanVerifier ExceptionVerifier HybridVerifier 
@Test(expected=YarnRuntimeException.class) public void testCreateDirsWithFileSystemNotBecomingAvailBeforeTimeout() throws Exception { dfsCluster.getFileSystem().setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER); Assert.assertTrue(dfsCluster.getFileSystem().isInSafeMode()); final ControlledClock clock=new ControlledClock(new SystemClock()); clock.setTime(1); new Thread(){ @Override public void run(){ try { Thread.sleep(500); clock.setTime(3000); } catch ( Exception ex) { Assert.fail(ex.toString()); } } } .start(); testCreateHistoryDirs(dfsCluster.getConfiguration(0),clock); }

Class: org.apache.hadoop.mapreduce.v2.hs.TestHistoryServerFileSystemStateStoreService

UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testUpdatedTokenRecovery() throws IOException { IOException intentionalErr=new IOException("intentional error"); FileSystem fs=FileSystem.getLocal(conf); final FileSystem spyfs=spy(fs); ArgumentMatcher updateTmpMatcher=new ArgumentMatcher(){ @Override public boolean matches( Object argument){ if (argument instanceof Path) { return ((Path)argument).getName().startsWith("update"); } return false; } } ; doThrow(intentionalErr).when(spyfs).rename(argThat(updateTmpMatcher),isA(Path.class)); conf.set(JHAdminConfig.MR_HS_FS_STATE_STORE_URI,testDir.getAbsoluteFile().toURI().toString()); HistoryServerStateStoreService store=new HistoryServerFileSystemStateStoreService(){ @Override FileSystem createFileSystem() throws IOException { return spyfs; } } ; store.init(conf); store.start(); final MRDelegationTokenIdentifier token1=new MRDelegationTokenIdentifier(new Text("tokenOwner1"),new Text("tokenRenewer1"),new Text("tokenUser1")); token1.setSequenceNumber(1); final Long tokenDate1=1L; store.storeToken(token1,tokenDate1); final Long newTokenDate1=975318642L; try { store.updateToken(token1,newTokenDate1); fail("intentional error not thrown"); } catch ( IOException e) { assertEquals(intentionalErr,e); } store.close(); store=createAndStartStore(); HistoryServerState state=store.loadState(); assertEquals("incorrect loaded token count",1,state.tokenState.size()); assertTrue("missing token 1",state.tokenState.containsKey(token1)); assertEquals("incorrect token 1 date",newTokenDate1,state.tokenState.get(token1)); store.close(); }

Class: org.apache.hadoop.mapreduce.v2.hs.TestJHSDelegationTokenSecretManager

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testRecovery() throws IOException { Configuration conf=new Configuration(); HistoryServerStateStoreService store=new HistoryServerMemStateStoreService(); store.init(conf); store.start(); JHSDelegationTokenSecretManagerForTest mgr=new JHSDelegationTokenSecretManagerForTest(store); mgr.startThreads(); MRDelegationTokenIdentifier tokenId1=new MRDelegationTokenIdentifier(new Text("tokenOwner"),new Text("tokenRenewer"),new Text("tokenUser")); Token token1=new Token(tokenId1,mgr); MRDelegationTokenIdentifier tokenId2=new MRDelegationTokenIdentifier(new Text("tokenOwner"),new Text("tokenRenewer"),new Text("tokenUser")); Token token2=new Token(tokenId2,mgr); DelegationKey[] keys=mgr.getAllKeys(); long tokenRenewDate1=mgr.getAllTokens().get(tokenId1).getRenewDate(); long tokenRenewDate2=mgr.getAllTokens().get(tokenId2).getRenewDate(); mgr.stopThreads(); mgr=new JHSDelegationTokenSecretManagerForTest(store); mgr.recover(store.loadState()); List recoveredKeys=Arrays.asList(mgr.getAllKeys()); for ( DelegationKey key : keys) { assertTrue("key missing after recovery",recoveredKeys.contains(key)); } assertTrue("token1 missing",mgr.getAllTokens().containsKey(tokenId1)); assertEquals("token1 renew date",tokenRenewDate1,mgr.getAllTokens().get(tokenId1).getRenewDate()); assertTrue("token2 missing",mgr.getAllTokens().containsKey(tokenId2)); assertEquals("token2 renew date",tokenRenewDate2,mgr.getAllTokens().get(tokenId2).getRenewDate()); mgr.startThreads(); mgr.verifyToken(tokenId1,token1.getPassword()); mgr.verifyToken(tokenId2,token2.getPassword()); MRDelegationTokenIdentifier tokenId3=new MRDelegationTokenIdentifier(new Text("tokenOwner"),new Text("tokenRenewer"),new Text("tokenUser")); Token token3=new Token(tokenId3,mgr); assertEquals("sequence number restore",tokenId2.getSequenceNumber() + 1,tokenId3.getSequenceNumber()); mgr.cancelToken(token1,"tokenOwner"); MRDelegationTokenIdentifier tokenIdFull=new MRDelegationTokenIdentifier(new Text("tokenOwner/localhost@LOCALHOST"),new Text("tokenRenewer"),new Text("tokenUser")); KerberosName.setRules("RULE:[1:$1]\nRULE:[2:$1]"); Token tokenFull=new Token(tokenIdFull,mgr); try { mgr.cancelToken(tokenFull,"tokenOwner"); } catch ( AccessControlException ace) { assertTrue(ace.getMessage().contains("is not authorized to cancel the token")); } mgr.cancelToken(tokenFull,tokenIdFull.getOwner().toString()); long tokenRenewDate3=mgr.getAllTokens().get(tokenId3).getRenewDate(); mgr.stopThreads(); mgr=new JHSDelegationTokenSecretManagerForTest(store); mgr.recover(store.loadState()); assertFalse("token1 should be missing",mgr.getAllTokens().containsKey(tokenId1)); assertTrue("token2 missing",mgr.getAllTokens().containsKey(tokenId2)); assertEquals("token2 renew date",tokenRenewDate2,mgr.getAllTokens().get(tokenId2).getRenewDate()); assertTrue("token3 missing",mgr.getAllTokens().containsKey(tokenId3)); assertEquals("token3 renew date",tokenRenewDate3,mgr.getAllTokens().get(tokenId3).getRenewDate()); mgr.startThreads(); mgr.verifyToken(tokenId2,token2.getPassword()); mgr.verifyToken(tokenId3,token3.getPassword()); mgr.stopThreads(); }

Class: org.apache.hadoop.mapreduce.v2.hs.TestJobHistoryEntities

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Simple test of some methods of CompletedJob * @throws Exception */ @Test(timeout=30000) public void testGetTaskAttemptCompletionEvent() throws Exception { HistoryFileInfo info=mock(HistoryFileInfo.class); when(info.getConfFile()).thenReturn(fullConfPath); completedJob=new CompletedJob(conf,jobId,fulleHistoryPath,loadTasks,"user",info,jobAclsManager); TaskCompletionEvent[] events=completedJob.getMapAttemptCompletionEvents(0,1000); assertEquals(10,completedJob.getMapAttemptCompletionEvents(0,10).length); int currentEventId=0; for ( TaskCompletionEvent taskAttemptCompletionEvent : events) { int eventId=taskAttemptCompletionEvent.getEventId(); assertTrue(eventId >= currentEventId); currentEventId=eventId; } assertNull(completedJob.loadConfFile()); assertEquals("Sleep job",completedJob.getName()); assertEquals("default",completedJob.getQueueName()); assertEquals(1.0,completedJob.getProgress(),0.001); assertEquals(12,completedJob.getTaskAttemptCompletionEvents(0,1000).length); assertEquals(10,completedJob.getTaskAttemptCompletionEvents(0,10).length); assertEquals(7,completedJob.getTaskAttemptCompletionEvents(5,10).length); assertEquals(1,completedJob.getDiagnostics().size()); assertEquals("",completedJob.getDiagnostics().get(0)); assertEquals(0,completedJob.getJobACLs().size()); }

Class: org.apache.hadoop.mapreduce.v2.hs.TestJobHistoryEvents

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testHistoryEvents() throws Exception { Configuration conf=new Configuration(); MRApp app=new MRAppWithHistory(2,1,true,this.getClass().getName(),true); app.submit(conf); Job job=app.getContext().getAllJobs().values().iterator().next(); JobId jobId=job.getID(); LOG.info("JOBID is " + TypeConverter.fromYarn(jobId).toString()); app.waitForState(job,JobState.SUCCEEDED); app.waitForState(Service.STATE.STOPPED); HistoryContext context=new JobHistory(); ((JobHistory)context).init(conf); ((JobHistory)context).start(); Assert.assertTrue(context.getStartTime() > 0); Assert.assertEquals(((JobHistory)context).getServiceState(),Service.STATE.STARTED); Job parsedJob=context.getJob(jobId); ((JobHistory)context).stop(); Assert.assertEquals(((JobHistory)context).getServiceState(),Service.STATE.STOPPED); Assert.assertEquals("CompletedMaps not correct",2,parsedJob.getCompletedMaps()); Assert.assertEquals(System.getProperty("user.name"),parsedJob.getUserName()); Map tasks=parsedJob.getTasks(); Assert.assertEquals("No of tasks not correct",3,tasks.size()); for ( Task task : tasks.values()) { verifyTask(task); } Map maps=parsedJob.getTasks(TaskType.MAP); Assert.assertEquals("No of maps not correct",2,maps.size()); Map reduces=parsedJob.getTasks(TaskType.REDUCE); Assert.assertEquals("No of reduces not correct",1,reduces.size()); Assert.assertEquals("CompletedReduce not correct",1,parsedJob.getCompletedReduces()); Assert.assertEquals("Job state not currect",JobState.SUCCEEDED,parsedJob.getState()); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testAssignedQueue() throws Exception { Configuration conf=new Configuration(); MRApp app=new MRAppWithHistory(2,1,true,this.getClass().getName(),true,"assignedQueue"); app.submit(conf); Job job=app.getContext().getAllJobs().values().iterator().next(); JobId jobId=job.getID(); LOG.info("JOBID is " + TypeConverter.fromYarn(jobId).toString()); app.waitForState(job,JobState.SUCCEEDED); app.waitForState(Service.STATE.STOPPED); HistoryContext context=new JobHistory(); ((JobHistory)context).init(conf); ((JobHistory)context).start(); Assert.assertTrue(context.getStartTime() > 0); Assert.assertEquals(((JobHistory)context).getServiceState(),Service.STATE.STARTED); Job parsedJob=context.getJob(jobId); ((JobHistory)context).stop(); Assert.assertEquals(((JobHistory)context).getServiceState(),Service.STATE.STOPPED); Assert.assertEquals("QueueName not correct","assignedQueue",parsedJob.getQueueName()); }

Class: org.apache.hadoop.mapreduce.v2.hs.TestJobHistoryParsing

InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier 
@Test(timeout=60000) public void testDiagnosticsForKilledJob() throws Exception { LOG.info("STARTING testDiagnosticsForKilledJob"); try { final Configuration conf=new Configuration(); conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,MyResolver.class,DNSToSwitchMapping.class); RackResolver.init(conf); MRApp app=new MRAppWithHistoryWithJobKilled(2,1,true,this.getClass().getName(),true); app.submit(conf); Job job=app.getContext().getAllJobs().values().iterator().next(); JobId jobId=job.getID(); app.waitForState(job,JobState.KILLED); app.waitForState(Service.STATE.STOPPED); JobHistory jobHistory=new JobHistory(); jobHistory.init(conf); HistoryFileInfo fileInfo=jobHistory.getJobFileInfo(jobId); JobHistoryParser parser; JobInfo jobInfo; synchronized (fileInfo) { Path historyFilePath=fileInfo.getHistoryFile(); FSDataInputStream in=null; FileContext fc=null; try { fc=FileContext.getFileContext(conf); in=fc.open(fc.makeQualified(historyFilePath)); } catch ( IOException ioe) { LOG.info("Can not open history file: " + historyFilePath,ioe); throw (new Exception("Can not open History File")); } parser=new JobHistoryParser(in); jobInfo=parser.parse(); } Exception parseException=parser.getParseException(); assertNull("Caught an expected exception " + parseException,parseException); final List originalDiagnostics=job.getDiagnostics(); final String historyError=jobInfo.getErrorInfo(); assertTrue("No original diagnostics for a failed job",originalDiagnostics != null && !originalDiagnostics.isEmpty()); assertNotNull("No history error info for a failed job ",historyError); for ( String diagString : originalDiagnostics) { assertTrue(historyError.contains(diagString)); } assertTrue("No killed message in diagnostics",historyError.contains(JobImpl.JOB_KILLED_DIAG)); } finally { LOG.info("FINISHED testDiagnosticsForKilledJob"); } }

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Simple test PartialJob */ @Test(timeout=1000) public void testPartialJob() throws Exception { JobId jobId=new JobIdPBImpl(); jobId.setId(0); JobIndexInfo jii=new JobIndexInfo(0L,System.currentTimeMillis(),"user","jobName",jobId,3,2,"JobStatus"); PartialJob test=new PartialJob(jii,jobId); Assert.assertEquals(1.0f,test.getProgress(),0.001f); assertNull(test.getAllCounters()); assertNull(test.getTasks()); assertNull(test.getTasks(TaskType.MAP)); assertNull(test.getTask(new TaskIdPBImpl())); assertNull(test.getTaskAttemptCompletionEvents(0,100)); assertNull(test.getMapAttemptCompletionEvents(0,100)); assertTrue(test.checkAccess(UserGroupInformation.getCurrentUser(),null)); assertNull(test.getAMInfos()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier 
@Test(timeout=60000) public void testCountersForFailedTask() throws Exception { LOG.info("STARTING testCountersForFailedTask"); try { Configuration conf=new Configuration(); conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,MyResolver.class,DNSToSwitchMapping.class); RackResolver.init(conf); MRApp app=new MRAppWithHistoryWithFailedTask(2,1,true,this.getClass().getName(),true); app.submit(conf); Job job=app.getContext().getAllJobs().values().iterator().next(); JobId jobId=job.getID(); app.waitForState(job,JobState.FAILED); app.waitForState(Service.STATE.STOPPED); JobHistory jobHistory=new JobHistory(); jobHistory.init(conf); HistoryFileInfo fileInfo=jobHistory.getJobFileInfo(jobId); JobHistoryParser parser; JobInfo jobInfo; synchronized (fileInfo) { Path historyFilePath=fileInfo.getHistoryFile(); FSDataInputStream in=null; FileContext fc=null; try { fc=FileContext.getFileContext(conf); in=fc.open(fc.makeQualified(historyFilePath)); } catch ( IOException ioe) { LOG.info("Can not open history file: " + historyFilePath,ioe); throw (new Exception("Can not open History File")); } parser=new JobHistoryParser(in); jobInfo=parser.parse(); } Exception parseException=parser.getParseException(); Assert.assertNull("Caught an expected exception " + parseException,parseException); for ( Map.Entry entry : jobInfo.getAllTasks().entrySet()) { TaskId yarnTaskID=TypeConverter.toYarn(entry.getKey()); CompletedTask ct=new CompletedTask(yarnTaskID,entry.getValue()); Assert.assertNotNull("completed task report has null counters",ct.getReport().getCounters()); } final List originalDiagnostics=job.getDiagnostics(); final String historyError=jobInfo.getErrorInfo(); assertTrue("No original diagnostics for a failed job",originalDiagnostics != null && !originalDiagnostics.isEmpty()); assertNotNull("No history error info for a failed job ",historyError); for ( String diagString : originalDiagnostics) { assertTrue(historyError.contains(diagString)); } } finally { LOG.info("FINISHED testCountersForFailedTask"); } }

IterativeVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier 
@Test(timeout=50000) public void testScanningOldDirs() throws Exception { LOG.info("STARTING testScanningOldDirs"); try { Configuration conf=new Configuration(); conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,MyResolver.class,DNSToSwitchMapping.class); RackResolver.init(conf); MRApp app=new MRAppWithHistory(1,1,true,this.getClass().getName(),true); app.submit(conf); Job job=app.getContext().getAllJobs().values().iterator().next(); JobId jobId=job.getID(); LOG.info("JOBID is " + TypeConverter.fromYarn(jobId).toString()); app.waitForState(job,JobState.SUCCEEDED); app.waitForState(Service.STATE.STOPPED); HistoryFileManagerForTest hfm=new HistoryFileManagerForTest(); hfm.init(conf); HistoryFileInfo fileInfo=hfm.getFileInfo(jobId); Assert.assertNotNull("Unable to locate job history",fileInfo); hfm.deleteJobFromJobListCache(fileInfo); final int msecPerSleep=10; int msecToSleep=10 * 1000; while (fileInfo.isMovePending() && msecToSleep > 0) { Assert.assertTrue(!fileInfo.didMoveFail()); msecToSleep-=msecPerSleep; Thread.sleep(msecPerSleep); } Assert.assertTrue("Timeout waiting for history move",msecToSleep > 0); fileInfo=hfm.getFileInfo(jobId); hfm.stop(); Assert.assertNotNull("Unable to locate old job history",fileInfo); Assert.assertTrue("HistoryFileManager not shutdown properly",hfm.moveToDoneExecutor.isTerminated()); } finally { LOG.info("FINISHED testScanningOldDirs"); } }

InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier 
/** * Test clean old history files. Files should be deleted after 1 week by * default. */ @Test(timeout=15000) public void testDeleteFileInfo() throws Exception { LOG.info("STARTING testDeleteFileInfo"); try { Configuration conf=new Configuration(); conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,MyResolver.class,DNSToSwitchMapping.class); RackResolver.init(conf); MRApp app=new MRAppWithHistory(1,1,true,this.getClass().getName(),true); app.submit(conf); Job job=app.getContext().getAllJobs().values().iterator().next(); JobId jobId=job.getID(); app.waitForState(job,JobState.SUCCEEDED); app.waitForState(Service.STATE.STOPPED); HistoryFileManager hfm=new HistoryFileManager(); hfm.init(conf); HistoryFileInfo fileInfo=hfm.getFileInfo(jobId); hfm.initExisting(); while (fileInfo.isMovePending()) { Thread.sleep(300); } Assert.assertNotNull(hfm.jobListCache.values()); hfm.clean(); Assert.assertFalse(fileInfo.isDeleted()); hfm.setMaxHistoryAge(-1); hfm.clean(); hfm.stop(); Assert.assertTrue("Thread pool shutdown",hfm.moveToDoneExecutor.isTerminated()); Assert.assertTrue("file should be deleted ",fileInfo.isDeleted()); } finally { LOG.info("FINISHED testDeleteFileInfo"); } }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Simple test some methods of JobHistory */ @Test(timeout=20000) public void testJobHistoryMethods() throws Exception { LOG.info("STARTING testJobHistoryMethods"); try { Configuration configuration=new Configuration(); configuration.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,MyResolver.class,DNSToSwitchMapping.class); RackResolver.init(configuration); MRApp app=new MRAppWithHistory(1,1,true,this.getClass().getName(),true); app.submit(configuration); Job job=app.getContext().getAllJobs().values().iterator().next(); JobId jobId=job.getID(); LOG.info("JOBID is " + TypeConverter.fromYarn(jobId).toString()); app.waitForState(job,JobState.SUCCEEDED); JobHistory jobHistory=new JobHistory(); jobHistory.init(configuration); Assert.assertEquals(1,jobHistory.getAllJobs().size()); Assert.assertEquals(1,jobHistory.getAllJobs(app.getAppID()).size()); JobsInfo jobsinfo=jobHistory.getPartialJobs(0L,10L,null,"default",0L,System.currentTimeMillis() + 1,0L,System.currentTimeMillis() + 1,JobState.SUCCEEDED); Assert.assertEquals(1,jobsinfo.getJobs().size()); Assert.assertNotNull(jobHistory.getApplicationAttemptId()); Assert.assertEquals("application_0_0000",jobHistory.getApplicationID().toString()); Assert.assertEquals("Job History Server",jobHistory.getApplicationName()); Assert.assertNull(jobHistory.getEventHandler()); Assert.assertNull(jobHistory.getClock()); Assert.assertNull(jobHistory.getClusterInfo()); } finally { LOG.info("FINISHED testJobHistoryMethods"); } }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testHistoryParsingForFailedAttempts() throws Exception { LOG.info("STARTING testHistoryParsingForFailedAttempts"); try { Configuration conf=new Configuration(); conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,MyResolver.class,DNSToSwitchMapping.class); RackResolver.init(conf); MRApp app=new MRAppWithHistoryWithFailedAttempt(2,1,true,this.getClass().getName(),true); app.submit(conf); Job job=app.getContext().getAllJobs().values().iterator().next(); JobId jobId=job.getID(); app.waitForState(job,JobState.SUCCEEDED); app.waitForState(Service.STATE.STOPPED); JobHistory jobHistory=new JobHistory(); jobHistory.init(conf); HistoryFileInfo fileInfo=jobHistory.getJobFileInfo(jobId); JobHistoryParser parser; JobInfo jobInfo; synchronized (fileInfo) { Path historyFilePath=fileInfo.getHistoryFile(); FSDataInputStream in=null; FileContext fc=null; try { fc=FileContext.getFileContext(conf); in=fc.open(fc.makeQualified(historyFilePath)); } catch ( IOException ioe) { LOG.info("Can not open history file: " + historyFilePath,ioe); throw (new Exception("Can not open History File")); } parser=new JobHistoryParser(in); jobInfo=parser.parse(); } Exception parseException=parser.getParseException(); Assert.assertNull("Caught an expected exception " + parseException,parseException); int noOffailedAttempts=0; Map allTasks=jobInfo.getAllTasks(); for ( Task task : job.getTasks().values()) { TaskInfo taskInfo=allTasks.get(TypeConverter.fromYarn(task.getID())); for ( TaskAttempt taskAttempt : task.getAttempts().values()) { TaskAttemptInfo taskAttemptInfo=taskInfo.getAllTaskAttempts().get(TypeConverter.fromYarn((taskAttempt.getID()))); Assert.assertEquals("rack-name is incorrect",taskAttemptInfo.getRackname(),RACK_NAME); if (taskAttemptInfo.getTaskStatus().equals("FAILED")) { noOffailedAttempts++; } } } Assert.assertEquals("No of Failed tasks doesn't match.",2,noOffailedAttempts); } finally { LOG.info("FINISHED testHistoryParsingForFailedAttempts"); } }

Class: org.apache.hadoop.mapreduce.v2.hs.TestJobHistoryServer

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=50000) public void testReports() throws Exception { Configuration config=new Configuration(); config.setClass(CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,MyResolver.class,DNSToSwitchMapping.class); RackResolver.init(config); MRApp app=new MRAppWithHistory(1,1,true,this.getClass().getName(),true); app.submit(config); Job job=app.getContext().getAllJobs().values().iterator().next(); app.waitForState(job,JobState.SUCCEEDED); historyServer=new JobHistoryServer(); historyServer.init(config); historyServer.start(); JobHistory jobHistory=null; for ( Service service : historyServer.getServices()) { if (service instanceof JobHistory) { jobHistory=(JobHistory)service; } } ; Map jobs=jobHistory.getAllJobs(); assertEquals(1,jobs.size()); assertEquals("job_0_0000",jobs.keySet().iterator().next().toString()); Task task=job.getTasks().values().iterator().next(); TaskAttempt attempt=task.getAttempts().values().iterator().next(); HistoryClientService historyService=historyServer.getClientService(); MRClientProtocol protocol=historyService.getClientHandler(); GetTaskAttemptReportRequest gtarRequest=recordFactory.newRecordInstance(GetTaskAttemptReportRequest.class); TaskAttemptId taId=attempt.getID(); taId.setTaskId(task.getID()); taId.getTaskId().setJobId(job.getID()); gtarRequest.setTaskAttemptId(taId); GetTaskAttemptReportResponse response=protocol.getTaskAttemptReport(gtarRequest); assertEquals("container_0_0000_01_000000",response.getTaskAttemptReport().getContainerId().toString()); assertTrue(response.getTaskAttemptReport().getDiagnosticInfo().isEmpty()); assertNotNull(response.getTaskAttemptReport().getCounters().getCounter(TaskCounter.PHYSICAL_MEMORY_BYTES)); assertEquals(taId.toString(),response.getTaskAttemptReport().getTaskAttemptId().toString()); GetTaskReportRequest request=recordFactory.newRecordInstance(GetTaskReportRequest.class); TaskId taskId=task.getID(); taskId.setJobId(job.getID()); request.setTaskId(taskId); GetTaskReportResponse reportResponse=protocol.getTaskReport(request); assertEquals("",reportResponse.getTaskReport().getDiagnosticsList().iterator().next()); assertEquals(1.0f,reportResponse.getTaskReport().getProgress(),0.01); assertEquals(taskId.toString(),reportResponse.getTaskReport().getTaskId().toString()); assertEquals(TaskState.SUCCEEDED,reportResponse.getTaskReport().getTaskState()); GetTaskAttemptCompletionEventsRequest taskAttemptRequest=recordFactory.newRecordInstance(GetTaskAttemptCompletionEventsRequest.class); taskAttemptRequest.setJobId(job.getID()); GetTaskAttemptCompletionEventsResponse taskAttemptCompletionEventsResponse=protocol.getTaskAttemptCompletionEvents(taskAttemptRequest); assertEquals(0,taskAttemptCompletionEventsResponse.getCompletionEventCount()); GetDiagnosticsRequest diagnosticRequest=recordFactory.newRecordInstance(GetDiagnosticsRequest.class); diagnosticRequest.setTaskAttemptId(taId); GetDiagnosticsResponse diagnosticResponse=protocol.getDiagnostics(diagnosticRequest); assertEquals(1,diagnosticResponse.getDiagnosticsCount()); assertEquals("",diagnosticResponse.getDiagnostics(0)); }

UtilityVerifier EqualityVerifier HybridVerifier 
@Test(timeout=60000) public void testLaunch() throws Exception { ExitUtil.disableSystemExit(); try { historyServer=JobHistoryServer.launchJobHistoryServer(new String[0]); } catch ( ExitUtil.ExitException e) { assertEquals(0,e.status); ExitUtil.resetFirstExitException(); fail(); } }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=50000) public void testStartStopServer() throws Exception { historyServer=new JobHistoryServer(); Configuration config=new Configuration(); historyServer.init(config); assertEquals(STATE.INITED,historyServer.getServiceState()); assertEquals(6,historyServer.getServices().size()); HistoryClientService historyService=historyServer.getClientService(); assertNotNull(historyServer.getClientService()); assertEquals(STATE.INITED,historyService.getServiceState()); historyServer.start(); assertEquals(STATE.STARTED,historyServer.getServiceState()); assertEquals(STATE.STARTED,historyService.getServiceState()); historyServer.stop(); assertEquals(STATE.STOPPED,historyServer.getServiceState()); assertNotNull(historyService.getClientHandler().getConnectAddress()); }

Class: org.apache.hadoop.mapreduce.v2.hs.TestJobIdHistoryFileInfoMap

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Trivial test case that verifies basic functionality of {@link JobIdHistoryFileInfoMap} */ @Test(timeout=2000) public void testWithSingleElement() throws InterruptedException { JobIdHistoryFileInfoMap mapWithSize=new JobIdHistoryFileInfoMap(); JobId jobId=MRBuilderUtils.newJobId(1,1,1); HistoryFileInfo fileInfo1=Mockito.mock(HistoryFileInfo.class); Mockito.when(fileInfo1.getJobId()).thenReturn(jobId); assertEquals("Incorrect return on putIfAbsent()",null,mapWithSize.putIfAbsent(jobId,fileInfo1)); assertEquals("Incorrect return on putIfAbsent()",fileInfo1,mapWithSize.putIfAbsent(jobId,fileInfo1)); assertEquals("Incorrect get()",fileInfo1,mapWithSize.get(jobId)); assertTrue("Incorrect size()",checkSize(mapWithSize,1)); NavigableSet set=mapWithSize.navigableKeySet(); assertEquals("Incorrect navigableKeySet()",1,set.size()); assertTrue("Incorrect navigableKeySet()",set.contains(jobId)); Collection values=mapWithSize.values(); assertEquals("Incorrect values()",1,values.size()); assertTrue("Incorrect values()",values.contains(fileInfo1)); }

Class: org.apache.hadoop.mapreduce.v2.hs.TestJobListCache

IterativeVerifier BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
@Test(timeout=1000) public void testEviction() throws InterruptedException { int maxSize=2; JobListCache cache=new JobListCache(maxSize,1000); JobId jobId1=MRBuilderUtils.newJobId(1,1,1); HistoryFileInfo fileInfo1=Mockito.mock(HistoryFileInfo.class); Mockito.when(fileInfo1.getJobId()).thenReturn(jobId1); JobId jobId2=MRBuilderUtils.newJobId(2,2,2); HistoryFileInfo fileInfo2=Mockito.mock(HistoryFileInfo.class); Mockito.when(fileInfo2.getJobId()).thenReturn(jobId2); JobId jobId3=MRBuilderUtils.newJobId(3,3,3); HistoryFileInfo fileInfo3=Mockito.mock(HistoryFileInfo.class); Mockito.when(fileInfo3.getJobId()).thenReturn(jobId3); cache.addIfAbsent(fileInfo1); cache.addIfAbsent(fileInfo2); cache.addIfAbsent(fileInfo3); Collection values; for (int i=0; i < 9; i++) { values=cache.values(); if (values.size() > maxSize) { Thread.sleep(100); } else { assertFalse("fileInfo1 should have been evicted",values.contains(fileInfo1)); return; } } fail("JobListCache didn't delete the extra entry"); }

Class: org.apache.hadoop.mapreduce.v2.hs.server.TestHSAdminServer

BooleanVerifier NullVerifier HybridVerifier 
@Test public void testRefreshSuperUserGroups() throws Exception { UserGroupInformation ugi=mock(UserGroupInformation.class); UserGroupInformation superUser=mock(UserGroupInformation.class); when(ugi.getRealUser()).thenReturn(superUser); when(superUser.getShortUserName()).thenReturn("superuser"); when(superUser.getUserName()).thenReturn("superuser"); when(ugi.getGroupNames()).thenReturn(new String[]{"group3"}); when(ugi.getUserName()).thenReturn("regularUser"); conf.set("hadoop.proxyuser.superuser.groups","group1,group2"); conf.set("hadoop.proxyuser.superuser.hosts","127.0.0.1"); String[] args=new String[1]; args[0]="-refreshSuperUserGroupsConfiguration"; hsAdminClient.run(args); Throwable th=null; try { ProxyUsers.authorize(ugi,"127.0.0.1"); } catch ( Exception e) { th=e; } assertTrue(th instanceof AuthorizationException); conf.set("hadoop.proxyuser.superuser.groups","group1,group2,group3"); th=null; try { ProxyUsers.authorize(ugi,"127.0.0.1"); } catch ( Exception e) { th=e; } assertTrue(th instanceof AuthorizationException); hsAdminClient.run(args); th=null; try { ProxyUsers.authorize(ugi,"127.0.0.1"); } catch ( Exception e) { th=e; } assertNull("Unexpected exception thrown: " + th,th); }

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testRefreshUserToGroupsMappings() throws Exception { String[] args=new String[]{"-refreshUserToGroupsMappings"}; Groups groups=Groups.getUserToGroupsMappingService(conf); String user=UserGroupInformation.getCurrentUser().getUserName(); System.out.println("first attempt:"); List g1=groups.getGroups(user); String[] str_groups=new String[g1.size()]; g1.toArray(str_groups); System.out.println(Arrays.toString(str_groups)); System.out.println("second attempt, should be same:"); List g2=groups.getGroups(user); g2.toArray(str_groups); System.out.println(Arrays.toString(str_groups)); for (int i=0; i < g2.size(); i++) { assertEquals("Should be same group ",g1.get(i),g2.get(i)); } hsAdminClient.run(args); System.out.println("third attempt(after refresh command), should be different:"); List g3=groups.getGroups(user); g3.toArray(str_groups); System.out.println(Arrays.toString(str_groups)); for (int i=0; i < g3.size(); i++) { assertFalse("Should be different group: " + g1.get(i) + " and "+ g3.get(i),g1.get(i).equals(g3.get(i))); } }

Class: org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServices

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testInvalidAccept() throws JSONException, Exception { WebResource r=resource(); String responseStr=""; try { responseStr=r.path("ws").path("v1").path("history").accept(MediaType.TEXT_PLAIN).get(String.class); fail("should have thrown exception on invalid uri"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.INTERNAL_SERVER_ERROR,response.getClientResponseStatus()); WebServicesTestUtils.checkStringMatch("error string exists and shouldn't","",responseStr); } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testInvalidUri2() throws JSONException, Exception { WebResource r=resource(); String responseStr=""; try { responseStr=r.path("ws").path("v1").path("invalid").accept(MediaType.APPLICATION_JSON).get(String.class); fail("should have thrown exception on invalid uri"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.NOT_FOUND,response.getClientResponseStatus()); WebServicesTestUtils.checkStringMatch("error string exists and shouldn't","",responseStr); } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testInvalidUri() throws JSONException, Exception { WebResource r=resource(); String responseStr=""; try { responseStr=r.path("ws").path("v1").path("history").path("bogus").accept(MediaType.APPLICATION_JSON).get(String.class); fail("should have thrown exception on invalid uri"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.NOT_FOUND,response.getClientResponseStatus()); WebServicesTestUtils.checkStringMatch("error string exists and shouldn't","",responseStr); } }

Class: org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServicesAcls

UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testGetJobTaskAttemptIdAcls(){ HttpServletRequest hsr=mock(HttpServletRequest.class); when(hsr.getRemoteUser()).thenReturn(ENEMY_USER); try { hsWebServices.getJobTaskAttemptId(hsr,this.jobIdStr,this.taskIdStr,this.taskAttemptIdStr); fail("enemy can access job"); } catch ( WebApplicationException e) { assertEquals(Status.UNAUTHORIZED,Status.fromStatusCode(e.getResponse().getStatus())); } when(hsr.getRemoteUser()).thenReturn(FRIENDLY_USER); hsWebServices.getJobTaskAttemptId(hsr,this.jobIdStr,this.taskIdStr,this.taskAttemptIdStr); }

UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testGetJobAcls(){ HttpServletRequest hsr=mock(HttpServletRequest.class); when(hsr.getRemoteUser()).thenReturn(ENEMY_USER); try { hsWebServices.getJob(hsr,jobIdStr); fail("enemy can access job"); } catch ( WebApplicationException e) { assertEquals(Status.UNAUTHORIZED,Status.fromStatusCode(e.getResponse().getStatus())); } when(hsr.getRemoteUser()).thenReturn(FRIENDLY_USER); hsWebServices.getJob(hsr,jobIdStr); }

UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testGetJobTaskAttemptsAcls(){ HttpServletRequest hsr=mock(HttpServletRequest.class); when(hsr.getRemoteUser()).thenReturn(ENEMY_USER); try { hsWebServices.getJobTaskAttempts(hsr,this.jobIdStr,this.taskIdStr); fail("enemy can access job"); } catch ( WebApplicationException e) { assertEquals(Status.UNAUTHORIZED,Status.fromStatusCode(e.getResponse().getStatus())); } when(hsr.getRemoteUser()).thenReturn(FRIENDLY_USER); hsWebServices.getJobTaskAttempts(hsr,this.jobIdStr,this.taskIdStr); }

UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testGetJobTasksAcls(){ HttpServletRequest hsr=mock(HttpServletRequest.class); when(hsr.getRemoteUser()).thenReturn(ENEMY_USER); try { hsWebServices.getJobTasks(hsr,jobIdStr,"m"); fail("enemy can access job"); } catch ( WebApplicationException e) { assertEquals(Status.UNAUTHORIZED,Status.fromStatusCode(e.getResponse().getStatus())); } when(hsr.getRemoteUser()).thenReturn(FRIENDLY_USER); hsWebServices.getJobTasks(hsr,jobIdStr,"m"); }

UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testGetSingleTaskCountersAcls(){ HttpServletRequest hsr=mock(HttpServletRequest.class); when(hsr.getRemoteUser()).thenReturn(ENEMY_USER); try { hsWebServices.getSingleTaskCounters(hsr,this.jobIdStr,this.taskIdStr); fail("enemy can access job"); } catch ( WebApplicationException e) { assertEquals(Status.UNAUTHORIZED,Status.fromStatusCode(e.getResponse().getStatus())); } when(hsr.getRemoteUser()).thenReturn(FRIENDLY_USER); hsWebServices.getSingleTaskCounters(hsr,this.jobIdStr,this.taskIdStr); }

UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testGetJobTaskAttemptIdCountersAcls(){ HttpServletRequest hsr=mock(HttpServletRequest.class); when(hsr.getRemoteUser()).thenReturn(ENEMY_USER); try { hsWebServices.getJobTaskAttemptIdCounters(hsr,this.jobIdStr,this.taskIdStr,this.taskAttemptIdStr); fail("enemy can access job"); } catch ( WebApplicationException e) { assertEquals(Status.UNAUTHORIZED,Status.fromStatusCode(e.getResponse().getStatus())); } when(hsr.getRemoteUser()).thenReturn(FRIENDLY_USER); hsWebServices.getJobTaskAttemptIdCounters(hsr,this.jobIdStr,this.taskIdStr,this.taskAttemptIdStr); }

UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testGetJobTaskAcls(){ HttpServletRequest hsr=mock(HttpServletRequest.class); when(hsr.getRemoteUser()).thenReturn(ENEMY_USER); try { hsWebServices.getJobTask(hsr,jobIdStr,this.taskIdStr); fail("enemy can access job"); } catch ( WebApplicationException e) { assertEquals(Status.UNAUTHORIZED,Status.fromStatusCode(e.getResponse().getStatus())); } when(hsr.getRemoteUser()).thenReturn(FRIENDLY_USER); hsWebServices.getJobTask(hsr,this.jobIdStr,this.taskIdStr); }

UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testGetJobCountersAcls(){ HttpServletRequest hsr=mock(HttpServletRequest.class); when(hsr.getRemoteUser()).thenReturn(ENEMY_USER); try { hsWebServices.getJobCounters(hsr,jobIdStr); fail("enemy can access job"); } catch ( WebApplicationException e) { assertEquals(Status.UNAUTHORIZED,Status.fromStatusCode(e.getResponse().getStatus())); } when(hsr.getRemoteUser()).thenReturn(FRIENDLY_USER); hsWebServices.getJobCounters(hsr,jobIdStr); }

UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testGetJobConfAcls(){ HttpServletRequest hsr=mock(HttpServletRequest.class); when(hsr.getRemoteUser()).thenReturn(ENEMY_USER); try { hsWebServices.getJobConf(hsr,jobIdStr); fail("enemy can access job"); } catch ( WebApplicationException e) { assertEquals(Status.UNAUTHORIZED,Status.fromStatusCode(e.getResponse().getStatus())); } when(hsr.getRemoteUser()).thenReturn(FRIENDLY_USER); hsWebServices.getJobConf(hsr,jobIdStr); }

Class: org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServicesJobs

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testJobIdInvalidDefault() throws JSONException, Exception { WebResource r=resource(); try { r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path("job_foo").get(JSONObject.class); fail("should have thrown exception on invalid uri"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.NOT_FOUND,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); verifyJobIdInvalid(message,type,classname); } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testJobIdInvalidXML() throws JSONException, Exception { WebResource r=resource(); try { r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path("job_foo").accept(MediaType.APPLICATION_XML).get(JSONObject.class); fail("should have thrown exception on invalid uri"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.NOT_FOUND,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType()); String msg=response.getEntity(String.class); System.out.println(msg); DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance(); DocumentBuilder db=dbf.newDocumentBuilder(); InputSource is=new InputSource(); is.setCharacterStream(new StringReader(msg)); Document dom=db.parse(is); NodeList nodes=dom.getElementsByTagName("RemoteException"); Element element=(Element)nodes.item(0); String message=WebServicesTestUtils.getXmlString(element,"message"); String type=WebServicesTestUtils.getXmlString(element,"exception"); String classname=WebServicesTestUtils.getXmlString(element,"javaClassName"); verifyJobIdInvalid(message,type,classname); } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testJobIdNonExist() throws JSONException, Exception { WebResource r=resource(); try { r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path("job_0_1234").get(JSONObject.class); fail("should have thrown exception on invalid uri"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.NOT_FOUND,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: job, job_0_1234, is not found",message); WebServicesTestUtils.checkStringMatch("exception type","NotFoundException",type); WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.NotFoundException",classname); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testJobCountersForKilledJob() throws Exception { WebResource r=resource(); appContext=new MockHistoryContext(0,1,1,1,true); injector=Guice.createInjector(new ServletModule(){ @Override protected void configureServlets(){ webApp=mock(HsWebApp.class); when(webApp.name()).thenReturn("hsmockwebapp"); bind(JAXBContextResolver.class); bind(HsWebServices.class); bind(GenericExceptionHandler.class); bind(WebApp.class).toInstance(webApp); bind(AppContext.class).toInstance(appContext); bind(HistoryContext.class).toInstance(appContext); bind(Configuration.class).toInstance(conf); serve("/*").with(GuiceContainer.class); } } ); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("counters/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject info=json.getJSONObject("jobCounters"); WebServicesTestUtils.checkStringMatch("id",MRApps.toString(id),info.getString("id")); assertTrue("Job shouldn't contain any counters",info.length() == 1); } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testJobIdInvalidBogus() throws JSONException, Exception { WebResource r=resource(); try { r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path("bogusfoo").get(JSONObject.class); fail("should have thrown exception on invalid uri"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.NOT_FOUND,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: JobId string : " + "bogusfoo is not properly formed",message); WebServicesTestUtils.checkStringMatch("exception type","NotFoundException",type); WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.NotFoundException",classname); } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testJobIdInvalid() throws JSONException, Exception { WebResource r=resource(); try { r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path("job_foo").accept(MediaType.APPLICATION_JSON).get(JSONObject.class); fail("should have thrown exception on invalid uri"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.NOT_FOUND,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); verifyJobIdInvalid(message,type,classname); } }

Class: org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServicesJobsQuery

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testJobsQueryFinishTimeBeginEnd() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); int size=jobsMap.size(); ArrayList finishTime=new ArrayList(size); for ( Map.Entry entry : jobsMap.entrySet()) { finishTime.add(entry.getValue().getReport().getFinishTime()); } Collections.sort(finishTime); assertTrue("Error we must have atleast 3 jobs",size >= 3); long midFinishTime=finishTime.get(size - 2); ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("finishedTimeBegin",String.valueOf(40000)).queryParam("finishedTimeEnd",String.valueOf(midFinishTime)).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject jobs=json.getJSONObject("jobs"); JSONArray arr=jobs.getJSONArray("job"); assertEquals("incorrect number of elements",size - 1,arr.length()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testJobsQueryStartTimeBeginEnd() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); int size=jobsMap.size(); ArrayList startTime=new ArrayList(size); for ( Map.Entry entry : jobsMap.entrySet()) { startTime.add(entry.getValue().getReport().getStartTime()); } Collections.sort(startTime); assertTrue("Error we must have atleast 3 jobs",size >= 3); long midStartTime=startTime.get(size - 2); ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("startedTimeBegin",String.valueOf(40000)).queryParam("startedTimeEnd",String.valueOf(midStartTime)).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject jobs=json.getJSONObject("jobs"); JSONArray arr=jobs.getJSONArray("job"); assertEquals("incorrect number of elements",size - 1,arr.length()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testJobsQueryStateNone() throws JSONException, Exception { WebResource r=resource(); ArrayList JOB_STATES=new ArrayList(Arrays.asList(JobState.values())); Map jobsMap=appContext.getAllJobs(); for ( Map.Entry entry : jobsMap.entrySet()) { JOB_STATES.remove(entry.getValue().getState()); } assertTrue("No unused job states",JOB_STATES.size() > 0); JobState notInUse=JOB_STATES.get(0); ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("state",notInUse.toString()).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); assertEquals("jobs is not null",JSONObject.NULL,json.get("jobs")); }

Class: org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServicesTasks

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testTaskIdInvalid() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); String tid="task_0_0000_d_000000"; try { r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).get(JSONObject.class); fail("should have thrown exception on invalid uri"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.NOT_FOUND,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: Bad TaskType identifier. TaskId string : " + "task_0_0000_d_000000 is not properly formed.",message); WebServicesTestUtils.checkStringMatch("exception type","NotFoundException",type); WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.NotFoundException",classname); } } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testTaskIdNonExist() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); String tid="task_0_0000_m_000000"; try { r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).get(JSONObject.class); fail("should have thrown exception on invalid uri"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.NOT_FOUND,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: task not found with id task_0_0000_m_000000",message); WebServicesTestUtils.checkStringMatch("exception type","NotFoundException",type); WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.NotFoundException",classname); } } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testTaskIdInvalid3() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); String tid="task_0_0000_m"; try { r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).get(JSONObject.class); fail("should have thrown exception on invalid uri"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.NOT_FOUND,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: TaskId string : " + "task_0_0000_m is not properly formed",message); WebServicesTestUtils.checkStringMatch("exception type","NotFoundException",type); WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.NotFoundException",classname); } } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testTaskIdInvalid2() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); String tid="task_0000_m_000000"; try { r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).get(JSONObject.class); fail("should have thrown exception on invalid uri"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.NOT_FOUND,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: TaskId string : " + "task_0000_m_000000 is not properly formed",message); WebServicesTestUtils.checkStringMatch("exception type","NotFoundException",type); WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.NotFoundException",classname); } } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testTaskIdBogus() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); String tid="bogustaskid"; try { r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).get(JSONObject.class); fail("should have thrown exception on invalid uri"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.NOT_FOUND,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: TaskId string : " + "bogustaskid is not properly formed",message); WebServicesTestUtils.checkStringMatch("exception type","NotFoundException",type); WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.NotFoundException",classname); } } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testTasksQueryInvalid() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); String tasktype="reduce"; try { r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").queryParam("type",tasktype).accept(MediaType.APPLICATION_JSON).get(JSONObject.class); fail("should have thrown exception on invalid uri"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: tasktype must be either m or r",message); WebServicesTestUtils.checkStringMatch("exception type","BadRequestException",type); WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.BadRequestException",classname); } } }

Class: org.apache.hadoop.mapreduce.v2.jobhistory.TestFileNameIndexUtils

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testJobHistoryFileNameBackwardsCompatible() throws IOException { JobID oldJobId=JobID.forName(JOB_ID); JobId jobId=TypeConverter.toYarn(oldJobId); long submitTime=Long.parseLong(SUBMIT_TIME); long finishTime=Long.parseLong(FINISH_TIME); int numMaps=Integer.parseInt(NUM_MAPS); int numReduces=Integer.parseInt(NUM_REDUCES); String jobHistoryFile=String.format(OLD_JOB_HISTORY_FILE_FORMATTER,JOB_ID,SUBMIT_TIME,USER_NAME,JOB_NAME,FINISH_TIME,NUM_MAPS,NUM_REDUCES,JOB_STATUS); JobIndexInfo info=FileNameIndexUtils.getIndexInfo(jobHistoryFile); Assert.assertEquals("Job id incorrect after decoding old history file",jobId,info.getJobId()); Assert.assertEquals("Submit time incorrect after decoding old history file",submitTime,info.getSubmitTime()); Assert.assertEquals("User incorrect after decoding old history file",USER_NAME,info.getUser()); Assert.assertEquals("Job name incorrect after decoding old history file",JOB_NAME,info.getJobName()); Assert.assertEquals("Finish time incorrect after decoding old history file",finishTime,info.getFinishTime()); Assert.assertEquals("Num maps incorrect after decoding old history file",numMaps,info.getNumMaps()); Assert.assertEquals("Num reduces incorrect after decoding old history file",numReduces,info.getNumReduces()); Assert.assertEquals("Job status incorrect after decoding old history file",JOB_STATUS,info.getJobStatus()); Assert.assertNull("Queue name incorrect after decoding old history file",info.getQueueName()); }

Class: org.apache.hadoop.mapreduce.v2.util.TestMRApps

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=120000) public void testGetJobFileWithUser(){ Configuration conf=new Configuration(); conf.set(MRJobConfig.MR_AM_STAGING_DIR,"/my/path/to/staging"); String jobFile=MRApps.getJobFile(conf,"dummy-user",new JobID("dummy-job",12345)); assertNotNull("getJobFile results in null.",jobFile); assertEquals("jobFile with specified user is not as expected.","/my/path/to/staging/dummy-user/.staging/job_dummy-job_12345/job.xml",jobFile); }

APIUtilityVerifier UtilityVerifier BooleanVerifier HybridVerifier 
@Test(timeout=120000) public void testSetClasspathWithUserPrecendence(){ Configuration conf=new Configuration(); conf.setBoolean(MRConfig.MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM,true); conf.setBoolean(MRJobConfig.MAPREDUCE_JOB_USER_CLASSPATH_FIRST,true); Map env=new HashMap(); try { MRApps.setClasspath(env,conf); } catch ( Exception e) { fail("Got exception while setting classpath"); } String env_str=env.get("CLASSPATH"); String expectedClasspath=StringUtils.join(ApplicationConstants.CLASS_PATH_SEPARATOR,Arrays.asList(ApplicationConstants.Environment.PWD.$$(),"job.jar/job.jar","job.jar/classes/","job.jar/lib/*",ApplicationConstants.Environment.PWD.$$() + "/*")); assertTrue("MAPREDUCE_JOB_USER_CLASSPATH_FIRST set, but not taking effect!",env_str.startsWith(expectedClasspath)); }

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=120000) public void testSetClasspathWithJobClassloader() throws IOException { Configuration conf=new Configuration(); conf.setBoolean(MRConfig.MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM,true); conf.setBoolean(MRJobConfig.MAPREDUCE_JOB_CLASSLOADER,true); Map env=new HashMap(); MRApps.setClasspath(env,conf); String cp=env.get("CLASSPATH"); String appCp=env.get("APP_CLASSPATH"); assertFalse("MAPREDUCE_JOB_CLASSLOADER true, but job.jar is in the" + " classpath!",cp.contains("jar" + ApplicationConstants.CLASS_PATH_SEPARATOR + "job")); assertFalse("MAPREDUCE_JOB_CLASSLOADER true, but PWD is in the classpath!",cp.contains("PWD")); String expectedAppClasspath=StringUtils.join(ApplicationConstants.CLASS_PATH_SEPARATOR,Arrays.asList(ApplicationConstants.Environment.PWD.$$(),"job.jar/job.jar","job.jar/classes/","job.jar/lib/*",ApplicationConstants.Environment.PWD.$$() + "/*")); assertEquals("MAPREDUCE_JOB_CLASSLOADER true, but job.jar is not in the app" + " classpath!",expectedAppClasspath,appCp); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier 
@Test public void testLogSystemProperties() throws Exception { Configuration conf=new Configuration(); conf.set(MRJobConfig.MAPREDUCE_JVM_SYSTEM_PROPERTIES_TO_LOG," "); String value=MRApps.getSystemPropertiesToLog(conf); assertNull(value); String classpath="java.class.path"; String os="os.name"; String version="java.version"; conf.set(MRJobConfig.MAPREDUCE_JVM_SYSTEM_PROPERTIES_TO_LOG,classpath + ", " + os); value=MRApps.getSystemPropertiesToLog(conf); assertNotNull(value); assertTrue(value.contains(classpath)); assertTrue(value.contains(os)); assertFalse(value.contains(version)); }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@SuppressWarnings("deprecation") @Test(timeout=30000) public void testSetupDistributedCache() throws Exception { Configuration conf=new Configuration(); conf.setClass("fs.mockfs.impl",MockFileSystem.class,FileSystem.class); URI mockUri=URI.create("mockfs://mock/"); FileSystem mockFs=((FilterFileSystem)FileSystem.get(mockUri,conf)).getRawFileSystem(); URI archive=new URI("mockfs://mock/tmp/something.zip"); Path archivePath=new Path(archive); URI file=new URI("mockfs://mock/tmp/something.txt#something"); Path filePath=new Path(file); when(mockFs.resolvePath(archivePath)).thenReturn(archivePath); when(mockFs.resolvePath(filePath)).thenReturn(filePath); DistributedCache.addCacheArchive(archive,conf); conf.set(MRJobConfig.CACHE_ARCHIVES_TIMESTAMPS,"10"); conf.set(MRJobConfig.CACHE_ARCHIVES_SIZES,"10"); conf.set(MRJobConfig.CACHE_ARCHIVES_VISIBILITIES,"true"); DistributedCache.addCacheFile(file,conf); conf.set(MRJobConfig.CACHE_FILE_TIMESTAMPS,"11"); conf.set(MRJobConfig.CACHE_FILES_SIZES,"11"); conf.set(MRJobConfig.CACHE_FILE_VISIBILITIES,"true"); Map localResources=new HashMap(); MRApps.setupDistributedCache(conf,localResources); assertEquals(2,localResources.size()); LocalResource lr=localResources.get("something.zip"); assertNotNull(lr); assertEquals(10l,lr.getSize()); assertEquals(10l,lr.getTimestamp()); assertEquals(LocalResourceType.ARCHIVE,lr.getType()); lr=localResources.get("something"); assertNotNull(lr); assertEquals(11l,lr.getSize()); assertEquals(11l,lr.getTimestamp()); assertEquals(LocalResourceType.FILE,lr.getType()); }

APIUtilityVerifier UtilityVerifier BooleanVerifier HybridVerifier 
@Test(timeout=120000) public void testSetClasspathWithNoUserPrecendence(){ Configuration conf=new Configuration(); conf.setBoolean(MRConfig.MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM,true); conf.setBoolean(MRJobConfig.MAPREDUCE_JOB_USER_CLASSPATH_FIRST,false); Map env=new HashMap(); try { MRApps.setClasspath(env,conf); } catch ( Exception e) { fail("Got exception while setting classpath"); } String env_str=env.get("CLASSPATH"); String expectedClasspath=StringUtils.join(ApplicationConstants.CLASS_PATH_SEPARATOR,Arrays.asList("job.jar/job.jar","job.jar/classes/","job.jar/lib/*",ApplicationConstants.Environment.PWD.$$() + "/*")); assertTrue("MAPREDUCE_JOB_USER_CLASSPATH_FIRST false, and job.jar is not in" + " the classpath!",env_str.contains(expectedClasspath)); assertFalse("MAPREDUCE_JOB_USER_CLASSPATH_FIRST false, but taking effect!",env_str.startsWith(expectedClasspath)); }

APIUtilityVerifier UtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=3000000) public void testSetClasspathWithFramework() throws IOException { final String FRAMEWORK_NAME="some-framework-name"; final String FRAMEWORK_PATH="some-framework-path#" + FRAMEWORK_NAME; Configuration conf=new Configuration(); conf.setBoolean(MRConfig.MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM,true); conf.set(MRJobConfig.MAPREDUCE_APPLICATION_FRAMEWORK_PATH,FRAMEWORK_PATH); Map env=new HashMap(); try { MRApps.setClasspath(env,conf); fail("Failed to catch framework path set without classpath change"); } catch ( IllegalArgumentException e) { assertTrue("Unexpected IllegalArgumentException",e.getMessage().contains("Could not locate MapReduce framework name '" + FRAMEWORK_NAME + "'")); } env.clear(); final String FRAMEWORK_CLASSPATH=FRAMEWORK_NAME + "/*.jar"; conf.set(MRJobConfig.MAPREDUCE_APPLICATION_CLASSPATH,FRAMEWORK_CLASSPATH); MRApps.setClasspath(env,conf); final String stdClasspath=StringUtils.join(ApplicationConstants.CLASS_PATH_SEPARATOR,Arrays.asList("job.jar/job.jar","job.jar/classes/","job.jar/lib/*",ApplicationConstants.Environment.PWD.$$() + "/*")); String expectedClasspath=StringUtils.join(ApplicationConstants.CLASS_PATH_SEPARATOR,Arrays.asList(ApplicationConstants.Environment.PWD.$$(),FRAMEWORK_CLASSPATH,stdClasspath)); assertEquals("Incorrect classpath with framework and no user precedence",expectedClasspath,env.get("CLASSPATH")); env.clear(); conf.setBoolean(MRJobConfig.MAPREDUCE_JOB_USER_CLASSPATH_FIRST,true); MRApps.setClasspath(env,conf); expectedClasspath=StringUtils.join(ApplicationConstants.CLASS_PATH_SEPARATOR,Arrays.asList(ApplicationConstants.Environment.PWD.$$(),stdClasspath,FRAMEWORK_CLASSPATH)); assertEquals("Incorrect classpath with framework and user precedence",expectedClasspath,env.get("CLASSPATH")); }

Class: org.apache.hadoop.metrics2.impl.TestMetricsCollectorImpl

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void recordBuilderShouldNoOpIfFiltered(){ SubsetConfiguration fc=new ConfigBuilder().add("p.exclude","foo").subset("p"); MetricsCollectorImpl mb=new MetricsCollectorImpl(); mb.setRecordFilter(newGlobFilter(fc)); MetricsRecordBuilderImpl rb=mb.addRecord("foo"); rb.tag(info("foo",""),"value").addGauge(info("g0",""),1); assertEquals("no tags",0,rb.tags().size()); assertEquals("no metrics",0,rb.metrics().size()); assertNull("null record",rb.getRecord()); assertEquals("no records",0,mb.getRecords().size()); }

Class: org.apache.hadoop.metrics2.impl.TestMetricsSourceAdapter

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testGetMetricsAndJmx() throws Exception { TestSource source=new TestSource("test"); MetricsSourceBuilder sb=MetricsAnnotations.newSourceBuilder(source); final MetricsSource s=sb.build(); List injectedTags=new ArrayList(); MetricsSourceAdapter sa=new MetricsSourceAdapter("test","test","test desc",s,injectedTags,null,null,1,false); MetricsCollectorImpl builder=new MetricsCollectorImpl(); Iterable metricsRecords=sa.getMetrics(builder,true); MetricsRecordImpl metricsRecord=metricsRecords.iterator().next(); assertEquals(0L,metricsRecord.metrics().iterator().next().value().longValue()); Thread.sleep(100); assertEquals(0L,(Number)sa.getAttribute("C1")); source.incrementCnt(); builder=new MetricsCollectorImpl(); metricsRecords=sa.getMetrics(builder,true); metricsRecord=metricsRecords.iterator().next(); assertTrue(metricsRecord.metrics().iterator().hasNext()); Thread.sleep(100); assertEquals(1L,(Number)sa.getAttribute("C1")); }

Class: org.apache.hadoop.metrics2.impl.TestMetricsSystemImpl

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testMultiThreadedPublish() throws Exception { final int numThreads=10; new ConfigBuilder().add("*.period",80).add("test.sink.collector." + MetricsConfig.QUEUE_CAPACITY_KEY,numThreads).save(TestMetricsConfig.getTestFilename("hadoop-metrics2-test")); final MetricsSystemImpl ms=new MetricsSystemImpl("Test"); ms.start(); final CollectingSink sink=new CollectingSink(numThreads); ms.registerSink("collector","Collector of values from all threads.",sink); final TestSource[] sources=new TestSource[numThreads]; final Thread[] threads=new Thread[numThreads]; final String[] results=new String[numThreads]; final CyclicBarrier barrier1=new CyclicBarrier(numThreads), barrier2=new CyclicBarrier(numThreads); for (int i=0; i < numThreads; i++) { sources[i]=ms.register("threadSource" + i,"A source of my threaded goodness.",new TestSource("threadSourceRec" + i)); threads[i]=new Thread(new Runnable(){ private boolean safeAwait( int mySource, CyclicBarrier barrier){ try { barrier1.await(2,TimeUnit.SECONDS); } catch ( InterruptedException e) { results[mySource]="Interrupted"; return false; } catch ( BrokenBarrierException e) { results[mySource]="Broken Barrier"; return false; } catch ( TimeoutException e) { results[mySource]="Timed out on barrier"; return false; } return true; } @Override public void run(){ int mySource=Integer.parseInt(Thread.currentThread().getName()); if (sink.collected[mySource].get() != 0L) { results[mySource]="Someone else collected my metric!"; return; } if (!safeAwait(mySource,barrier1)) return; sources[mySource].g1.set(230); ms.publishMetricsNow(); if (!safeAwait(mySource,barrier2)) return; if (sink.collected[mySource].get() != 230L) { results[mySource]="Metric not collected!"; return; } results[mySource]="Passed"; } } ,"" + i); } for ( Thread t : threads) t.start(); for ( Thread t : threads) t.join(); assertEquals(0L,ms.droppedPubAll.value()); assertTrue(StringUtils.join("\n",Arrays.asList(results)),Iterables.all(Arrays.asList(results),new Predicate(){ @Override public boolean apply( @Nullable String input){ return input.equalsIgnoreCase("Passed"); } } )); ms.stop(); ms.shutdown(); }

InternalCallVerifier IdentityVerifier NullVerifier HybridVerifier 
@Test public void testRegisterDups(){ MetricsSystem ms=new MetricsSystemImpl(); TestSource ts1=new TestSource("ts1"); TestSource ts2=new TestSource("ts2"); ms.register("ts1","",ts1); MetricsSource s1=ms.getSource("ts1"); assertNotNull(s1); ms.register("ts1","",ts2); MetricsSource s2=ms.getSource("ts1"); assertNotNull(s2); assertNotSame(s1,s2); ms.shutdown(); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testHangingSink(){ new ConfigBuilder().add("*.period",8).add("test.sink.test.class",TestSink.class.getName()).add("test.sink.hanging.retry.delay","1").add("test.sink.hanging.retry.backoff","1.01").add("test.sink.hanging.retry.count","0").save(TestMetricsConfig.getTestFilename("hadoop-metrics2-test")); MetricsSystemImpl ms=new MetricsSystemImpl("Test"); ms.start(); TestSource s=ms.register("s3","s3 desc",new TestSource("s3rec")); s.c1.incr(); HangingSink hanging=new HangingSink(); ms.registerSink("hanging","Hang the sink!",hanging); ms.publishMetricsNow(); assertEquals(1L,ms.droppedPubAll.value()); assertFalse(hanging.getInterrupted()); ms.stop(); ms.shutdown(); assertTrue(hanging.getInterrupted()); assertTrue("The sink didn't get called after its first hang " + "for subsequent records.",hanging.getGotCalledSecondTime()); }

Class: org.apache.hadoop.metrics2.impl.TestSinkQueue

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test concurrent consumer access, which is illegal * @throws Exception */ @Test public void testConcurrentConsumers() throws Exception { final SinkQueue q=newSleepingConsumerQueue(2,1); assertTrue("should enqueue",q.enqueue(2)); assertEquals("queue back",2,(int)q.back()); assertTrue("should drop",!q.enqueue(3)); shouldThrowCME(new Fun(){ @Override public void run(){ q.clear(); } } ); shouldThrowCME(new Fun(){ @Override public void run() throws Exception { q.consume(null); } } ); shouldThrowCME(new Fun(){ @Override public void run() throws Exception { q.consumeAll(null); } } ); shouldThrowCME(new Fun(){ @Override public void run() throws Exception { q.dequeue(); } } ); assertEquals("queue size",2,q.size()); assertEquals("queue front",1,(int)q.front()); assertEquals("queue back",2,(int)q.back()); }

InternalCallVerifier IdentityVerifier EqualityVerifier HybridVerifier 
/** * Test the consumer throwing exceptions * @throws Exception */ @Test public void testConsumerException() throws Exception { final SinkQueue q=new SinkQueue(1); final RuntimeException ex=new RuntimeException("expected"); q.enqueue(1); try { q.consume(new Consumer(){ @Override public void consume( Integer e){ throw ex; } } ); } catch ( Exception expected) { assertSame("consumer exception",ex,expected); } assertEquals("queue size",1,q.size()); assertEquals("element",1,(int)q.dequeue()); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test common use case * @throws Exception */ @Test public void testCommon() throws Exception { final SinkQueue q=new SinkQueue(2); q.enqueue(1); assertEquals("queue front",1,(int)q.front()); assertEquals("queue back",1,(int)q.back()); assertEquals("element",1,(int)q.dequeue()); assertTrue("should enqueue",q.enqueue(2)); q.consume(new Consumer(){ @Override public void consume( Integer e){ assertEquals("element",2,(int)e); } } ); assertTrue("should enqueue",q.enqueue(3)); assertEquals("element",3,(int)q.dequeue()); assertEquals("queue size",0,q.size()); assertEquals("queue front",null,q.front()); assertEquals("queue back",null,q.back()); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test nonblocking enqueue when queue is full * @throws Exception */ @Test public void testFull() throws Exception { final SinkQueue q=new SinkQueue(1); q.enqueue(1); assertTrue("should drop",!q.enqueue(2)); assertEquals("element",1,(int)q.dequeue()); q.enqueue(3); q.consume(new Consumer(){ @Override public void consume( Integer e){ assertEquals("element",3,(int)e); } } ); assertEquals("queue size",0,q.size()); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test consumers that take their time. * @throws Exception */ @Test public void testHangingConsumer() throws Exception { SinkQueue q=newSleepingConsumerQueue(2,1,2); assertEquals("queue back",2,(int)q.back()); assertTrue("should drop",!q.enqueue(3)); assertEquals("queue size",2,q.size()); assertEquals("queue head",1,(int)q.front()); assertEquals("queue back",2,(int)q.back()); }

IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test the consumeAll method * @throws Exception */ @Test public void testConsumeAll() throws Exception { final int capacity=64; final SinkQueue q=new SinkQueue(capacity); for (int i=0; i < capacity; ++i) { assertTrue("should enqueue",q.enqueue(i)); } assertTrue("should not enqueue",!q.enqueue(capacity)); final Runnable trigger=mock(Runnable.class); q.consumeAll(new Consumer(){ private int expected=0; @Override public void consume( Integer e){ assertEquals("element",expected++,(int)e); trigger.run(); } } ); verify(trigger,times(capacity)).run(); }

Class: org.apache.hadoop.metrics2.lib.TestMetricsRegistry

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test various factory methods */ @Test public void testNewMetrics(){ final MetricsRegistry r=new MetricsRegistry("test"); r.newCounter("c1","c1 desc",1); r.newCounter("c2","c2 desc",2L); r.newGauge("g1","g1 desc",3); r.newGauge("g2","g2 desc",4L); r.newStat("s1","s1 desc","ops","time"); assertEquals("num metrics in registry",5,r.metrics().size()); assertTrue("c1 found",r.get("c1") instanceof MutableCounterInt); assertTrue("c2 found",r.get("c2") instanceof MutableCounterLong); assertTrue("g1 found",r.get("g1") instanceof MutableGaugeInt); assertTrue("g2 found",r.get("g2") instanceof MutableGaugeLong); assertTrue("s1 found",r.get("s1") instanceof MutableStat); expectMetricsException("Metric name c1 already exists",new Runnable(){ @Override public void run(){ r.newCounter("c1","test dup",0); } } ); }

Class: org.apache.hadoop.metrics2.sink.TestFileSink

TestCleaner BranchVerifier BooleanVerifier HybridVerifier 
@After public void after(){ if (outFile != null) { outFile.delete(); assertTrue(!outFile.exists()); } }

Class: org.apache.hadoop.metrics2.util.TestMetricsCache

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@SuppressWarnings("deprecation") @Test public void testGet(){ MetricsCache cache=new MetricsCache(); assertNull("empty",cache.get("r",Arrays.asList(makeTag("t","t")))); MetricsRecord mr=makeRecord("r",Arrays.asList(makeTag("t","t")),Arrays.asList(makeMetric("m",1))); cache.update(mr); MetricsCache.Record cr=cache.get("r",mr.tags()); LOG.debug("tags=" + mr.tags() + " cr="+ cr); assertNotNull("Got record",cr); assertEquals("contains 1 metric",1,cr.metrics().size()); checkMetricValue("new metric value",cr,"m",1); }

Class: org.apache.hadoop.metrics2.util.TestSampleQuantiles

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Check that counts and quantile estimates are correctly reset after a call * to {@link SampleQuantiles#clear()}. */ @Test public void testClear() throws IOException { for (int i=0; i < 1000; i++) { estimator.insert(i); } estimator.clear(); assertEquals(estimator.getCount(),0); assertEquals(estimator.getSampleCount(),0); assertNull(estimator.snapshot()); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Check that the counts of the number of items in the window and sample are * incremented correctly as items are added. */ @Test public void testCount() throws IOException { assertEquals(estimator.getCount(),0); assertEquals(estimator.getSampleCount(),0); assertNull(estimator.snapshot()); estimator.insert(1337); assertEquals(estimator.getCount(),1); estimator.snapshot(); assertEquals(estimator.getSampleCount(),1); assertEquals("50.00 %ile +/- 5.00%: 1337\n" + "75.00 %ile +/- 2.50%: 1337\n" + "90.00 %ile +/- 1.00%: 1337\n"+ "95.00 %ile +/- 0.50%: 1337\n"+ "99.00 %ile +/- 0.10%: 1337",estimator.toString()); }

Class: org.apache.hadoop.net.TestDNS

UtilityVerifier EqualityVerifier HybridVerifier 
/** * Get the IP addresses of an unknown interface */ @Test public void testIPsOfUnknownInterface() throws Exception { try { DNS.getIPs("name-of-an-unknown-interface"); fail("Got an IP for a bogus interface"); } catch ( UnknownHostException e) { assertEquals("No such interface name-of-an-unknown-interface",e.getMessage()); } }

APIUtilityVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Test that repeated calls to getting the local host are fairly fast, and * hence that caching is being used * @throws Exception if hostname lookups fail */ @Test public void testGetLocalHostIsFast() throws Exception { String hostname1=DNS.getDefaultHost(DEFAULT); assertNotNull(hostname1); String hostname2=DNS.getDefaultHost(DEFAULT); long t1=Time.now(); String hostname3=DNS.getDefaultHost(DEFAULT); long t2=Time.now(); assertEquals(hostname3,hostname2); assertEquals(hostname2,hostname1); long interval=t2 - t1; assertTrue("Took too long to determine local host - caching is not working",interval < 20000); }

Class: org.apache.hadoop.net.TestNetUtils

UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testCreateSocketAddress() throws Throwable { InetSocketAddress addr=NetUtils.createSocketAddr("127.0.0.1:12345",1000,"myconfig"); assertEquals("127.0.0.1",addr.getAddress().getHostAddress()); assertEquals(12345,addr.getPort()); addr=NetUtils.createSocketAddr("127.0.0.1",1000,"myconfig"); assertEquals("127.0.0.1",addr.getAddress().getHostAddress()); assertEquals(1000,addr.getPort()); try { addr=NetUtils.createSocketAddr("127.0.0.1:blahblah",1000,"myconfig"); fail("Should have failed to parse bad port"); } catch ( IllegalArgumentException iae) { assertInException(iae,"myconfig"); } }

UtilityVerifier BooleanVerifier HybridVerifier 
/** * Test that we can't accidentally connect back to the connecting socket due * to a quirk in the TCP spec. * This is a regression test for HADOOP-6722. */ @Test public void testAvoidLoopbackTcpSockets() throws Exception { Configuration conf=new Configuration(); Socket socket=NetUtils.getDefaultSocketFactory(conf).createSocket(); socket.bind(new InetSocketAddress("127.0.0.1",0)); System.err.println("local address: " + socket.getLocalAddress()); System.err.println("local port: " + socket.getLocalPort()); try { NetUtils.connect(socket,new InetSocketAddress(socket.getLocalAddress(),socket.getLocalPort()),20000); socket.close(); fail("Should not have connected"); } catch ( ConnectException ce) { System.err.println("Got exception: " + ce); assertTrue(ce.getMessage().contains("resulted in a loopback")); } catch ( SocketException se) { assertTrue(se.getMessage().contains("Invalid argument")); } }

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test for {@link NetUtils#normalizeHostNames} */ @Test public void testNormalizeHostName(){ List hosts=Arrays.asList(new String[]{"127.0.0.1","localhost","1.kanyezone.appspot.com","UnknownHost123"}); List normalizedHosts=NetUtils.normalizeHostNames(hosts); assertEquals(normalizedHosts.get(0),hosts.get(0)); assertFalse(normalizedHosts.get(1).equals(hosts.get(1))); assertEquals(normalizedHosts.get(1),hosts.get(0)); assertFalse(normalizedHosts.get(2).equals(hosts.get(2))); assertEquals(normalizedHosts.get(3),hosts.get(3)); }

Class: org.apache.hadoop.net.TestNetworkTopology

IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testRemove() throws Exception { for (int i=0; i < dataNodes.length; i++) { cluster.remove(dataNodes[i]); } for (int i=0; i < dataNodes.length; i++) { assertFalse(cluster.contains(dataNodes[i])); } assertEquals(0,cluster.getNumOfLeaves()); for (int i=0; i < dataNodes.length; i++) { cluster.add(dataNodes[i]); } }

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=180000) public void testInvalidNetworkTopologiesNotCachedInHdfs() throws Exception { Configuration conf=new HdfsConfiguration(); MiniDFSCluster cluster=null; try { String racks[]={"/a/b","/c"}; String hosts[]={"foo1.example.com","foo2.example.com"}; cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).racks(racks).hosts(hosts).build(); cluster.waitActive(); NamenodeProtocols nn=cluster.getNameNodeRpc(); Assert.assertNotNull(nn); DatanodeInfo[] info; while (true) { info=nn.getDatanodeReport(DatanodeReportType.LIVE); Assert.assertFalse(info.length == 2); if (info.length == 1) { break; } Thread.sleep(1000); } int validIdx=info[0].getHostName().equals(hosts[0]) ? 0 : 1; int invalidIdx=validIdx == 1 ? 0 : 1; StaticMapping.addNodeToRack(hosts[invalidIdx],racks[validIdx]); LOG.info("datanode " + validIdx + " came up with network location "+ info[0].getNetworkLocation()); cluster.restartDataNode(invalidIdx); Thread.sleep(5000); while (true) { info=nn.getDatanodeReport(DatanodeReportType.LIVE); if (info.length == 2) { break; } if (info.length == 0) { LOG.info("got no valid DNs"); } else if (info.length == 1) { LOG.info("got one valid DN: " + info[0].getHostName() + " (at "+ info[0].getNetworkLocation()+ ")"); } Thread.sleep(1000); } Assert.assertEquals(info[0].getNetworkLocation(),info[1].getNetworkLocation()); } finally { if (cluster != null) { cluster.shutdown(); } } }

UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
@Test public void testCreateInvalidTopology() throws Exception { NetworkTopology invalCluster=new NetworkTopology(); DatanodeDescriptor invalDataNodes[]=new DatanodeDescriptor[]{DFSTestUtil.getDatanodeDescriptor("1.1.1.1","/d1/r1"),DFSTestUtil.getDatanodeDescriptor("2.2.2.2","/d1/r1"),DFSTestUtil.getDatanodeDescriptor("3.3.3.3","/d1")}; invalCluster.add(invalDataNodes[0]); invalCluster.add(invalDataNodes[1]); try { invalCluster.add(invalDataNodes[2]); fail("expected InvalidTopologyException"); } catch ( NetworkTopology.InvalidTopologyException e) { assertTrue(e.getMessage().startsWith("Failed to add ")); assertTrue(e.getMessage().contains("You cannot have a rack and a non-rack node at the same " + "level of the network topology.")); } }

APIUtilityVerifier IterativeVerifier BranchVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * This test checks that chooseRandom works for an excluded rack. */ @Test public void testChooseRandomExcludedRack(){ Map frequency=pickNodesAtRandom(100,"~" + "/d2"); for (int j=0; j < dataNodes.length; j++) { int freq=frequency.get(dataNodes[j]); if (dataNodes[j].getNetworkLocation().startsWith("/d2")) { assertEquals(0,freq); } else { assertTrue(freq > 0); } } }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testRacks() throws Exception { assertEquals(cluster.getNumOfRacks(),6); assertTrue(cluster.isOnSameRack(dataNodes[0],dataNodes[1])); assertFalse(cluster.isOnSameRack(dataNodes[1],dataNodes[2])); assertTrue(cluster.isOnSameRack(dataNodes[2],dataNodes[3])); assertTrue(cluster.isOnSameRack(dataNodes[3],dataNodes[4])); assertFalse(cluster.isOnSameRack(dataNodes[4],dataNodes[5])); assertTrue(cluster.isOnSameRack(dataNodes[5],dataNodes[6])); }

Class: org.apache.hadoop.net.TestNetworkTopologyWithNodeGroup

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testNodeGroups() throws Exception { assertEquals(3,cluster.getNumOfRacks()); assertTrue(cluster.isOnSameNodeGroup(dataNodes[0],dataNodes[1])); assertFalse(cluster.isOnSameNodeGroup(dataNodes[1],dataNodes[2])); assertFalse(cluster.isOnSameNodeGroup(dataNodes[2],dataNodes[3])); assertTrue(cluster.isOnSameNodeGroup(dataNodes[3],dataNodes[4])); assertFalse(cluster.isOnSameNodeGroup(dataNodes[4],dataNodes[5])); assertFalse(cluster.isOnSameNodeGroup(dataNodes[5],dataNodes[6])); assertFalse(cluster.isOnSameNodeGroup(dataNodes[6],dataNodes[7])); }

BranchVerifier UtilityVerifier BooleanVerifier HybridVerifier 
/** * This test checks that adding a node with invalid topology will be failed * with an exception to show topology is invalid. */ @Test public void testAddNodeWithInvalidTopology(){ try { cluster.add(rackOnlyNode); fail("Exception should be thrown, so we should not have reached here."); } catch ( Exception e) { if (!(e instanceof IllegalArgumentException)) { fail("Expecting IllegalArgumentException, but caught:" + e); } assertTrue(e.getMessage().contains("illegal network location")); } }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testRacks() throws Exception { assertEquals(3,cluster.getNumOfRacks()); assertTrue(cluster.isOnSameRack(dataNodes[0],dataNodes[1])); assertTrue(cluster.isOnSameRack(dataNodes[1],dataNodes[2])); assertFalse(cluster.isOnSameRack(dataNodes[2],dataNodes[3])); assertTrue(cluster.isOnSameRack(dataNodes[3],dataNodes[4])); assertTrue(cluster.isOnSameRack(dataNodes[4],dataNodes[5])); assertFalse(cluster.isOnSameRack(dataNodes[5],dataNodes[6])); assertTrue(cluster.isOnSameRack(dataNodes[6],dataNodes[7])); }

Class: org.apache.hadoop.net.TestSocketIOWithTimeout

APIUtilityVerifier BranchVerifier UtilityVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
@Test public void testSocketIOWithTimeout() throws Exception { Pipe pipe=Pipe.open(); Pipe.SourceChannel source=pipe.source(); Pipe.SinkChannel sink=pipe.sink(); try { final InputStream in=new SocketInputStream(source,TIMEOUT); OutputStream out=new SocketOutputStream(sink,TIMEOUT); byte[] writeBytes=TEST_STRING.getBytes(); byte[] readBytes=new byte[writeBytes.length]; byte byteWithHighBit=(byte)0x80; out.write(writeBytes); out.write(byteWithHighBit); doIO(null,out,TIMEOUT); in.read(readBytes); assertTrue(Arrays.equals(writeBytes,readBytes)); assertEquals(byteWithHighBit & 0xff,in.read()); doIO(in,null,TIMEOUT); ((SocketInputStream)in).setTimeout(TIMEOUT * 2); doIO(in,null,TIMEOUT * 2); ((SocketInputStream)in).setTimeout(0); TestingThread thread=new TestingThread(ctx){ @Override public void doWork() throws Exception { try { in.read(); fail("Did not fail with interrupt"); } catch ( InterruptedIOException ste) { LOG.info("Got expection while reading as expected : " + ste.getMessage()); } } } ; ctx.addThread(thread); ctx.startThreads(); Thread.sleep(1000); thread.interrupt(); ctx.stop(); assertTrue(source.isOpen()); assertTrue(sink.isOpen()); if (!Shell.WINDOWS && !Shell.PPC_64) { try { out.write(1); fail("Did not throw"); } catch ( IOException ioe) { GenericTestUtils.assertExceptionContains("stream is closed",ioe); } } out.close(); assertFalse(sink.isOpen()); assertEquals(-1,in.read()); in.close(); assertFalse(source.isOpen()); } finally { if (source != null) { source.close(); } if (sink != null) { sink.close(); } } }

Class: org.apache.hadoop.net.TestStaticMapping

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Verify that a configuration string builds a topology */ @Test public void testReadNodesFromConfig() throws Throwable { StaticMapping mapping=newInstance(); Configuration conf=new Configuration(); conf.set(StaticMapping.KEY_HADOOP_CONFIGURED_NODE_MAPPING,"n1=/r1,n2=/r2"); mapping.setConf(conf); assertSingleSwitch(mapping); List l1=new ArrayList(3); l1.add("n1"); l1.add("unknown"); l1.add("n2"); List resolved=mapping.resolve(l1); assertEquals(3,resolved.size()); assertEquals("/r1",resolved.get(0)); assertEquals(NetworkTopology.DEFAULT_RACK,resolved.get(1)); assertEquals("/r2",resolved.get(2)); Map switchMap=mapping.getSwitchMap(); String topology=mapping.dumpTopology(); LOG.info(topology); assertEquals(topology,2,switchMap.size()); assertEquals(topology,"/r1",switchMap.get("n1")); assertNull(topology,switchMap.get("unknown")); }

Class: org.apache.hadoop.net.unix.TestDomainSocket

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test setting some server options. * @throws IOException */ @Test(timeout=180000) public void testServerOptions() throws Exception { final String TEST_PATH=new File(sockDir.getDir(),"test_sock_server_options").getAbsolutePath(); DomainSocket serv=DomainSocket.bindAndListen(TEST_PATH); try { int bufSize=serv.getAttribute(DomainSocket.RECEIVE_BUFFER_SIZE); int newBufSize=bufSize / 2; serv.setAttribute(DomainSocket.RECEIVE_BUFFER_SIZE,newBufSize); int nextBufSize=serv.getAttribute(DomainSocket.RECEIVE_BUFFER_SIZE); Assert.assertEquals(newBufSize,nextBufSize); int newTimeout=1000; serv.setAttribute(DomainSocket.RECEIVE_TIMEOUT,newTimeout); int nextTimeout=serv.getAttribute(DomainSocket.RECEIVE_TIMEOUT); Assert.assertEquals(newTimeout,nextTimeout); try { serv.accept(); Assert.fail("expected the accept() to time out and fail"); } catch ( SocketTimeoutException e) { GenericTestUtils.assertExceptionContains("accept(2) error: ",e); } } finally { serv.close(); Assert.assertFalse(serv.isOpen()); } }

APIUtilityVerifier IterativeVerifier BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier PublicFieldVerifier HybridVerifier 
/** * Test file descriptor passing. * @throws IOException */ @Test(timeout=180000) public void testFdPassing() throws Exception { final String TEST_PATH=new File(sockDir.getDir(),"test_sock").getAbsolutePath(); final byte clientMsg1[]=new byte[]{0x11,0x22,0x33,0x44,0x55,0x66}; final byte serverMsg1[]=new byte[]{0x31,0x30,0x32,0x34,0x31,0x33,0x44,0x1,0x1,0x1,0x1,0x1}; final ArrayBlockingQueue threadResults=new ArrayBlockingQueue(2); final DomainSocket serv=DomainSocket.bindAndListen(TEST_PATH); final PassedFile passedFiles[]=new PassedFile[]{new PassedFile(1),new PassedFile(2)}; final FileDescriptor passedFds[]=new FileDescriptor[passedFiles.length]; for (int i=0; i < passedFiles.length; i++) { passedFds[i]=passedFiles[i].getInputStream().getFD(); } Thread serverThread=new Thread(){ public void run(){ DomainSocket conn=null; try { conn=serv.accept(); byte in1[]=new byte[clientMsg1.length]; InputStream connInputStream=conn.getInputStream(); IOUtils.readFully(connInputStream,in1,0,in1.length); Assert.assertTrue(Arrays.equals(clientMsg1,in1)); DomainSocket domainConn=(DomainSocket)conn; domainConn.sendFileDescriptors(passedFds,serverMsg1,0,serverMsg1.length); conn.close(); } catch ( Throwable e) { threadResults.add(e); Assert.fail(e.getMessage()); } threadResults.add(new Success()); } } ; serverThread.start(); Thread clientThread=new Thread(){ public void run(){ try { DomainSocket client=DomainSocket.connect(TEST_PATH); OutputStream clientOutputStream=client.getOutputStream(); InputStream clientInputStream=client.getInputStream(); clientOutputStream.write(clientMsg1); DomainSocket domainConn=(DomainSocket)client; byte in1[]=new byte[serverMsg1.length]; FileInputStream recvFis[]=new FileInputStream[passedFds.length]; int r=domainConn.recvFileInputStreams(recvFis,in1,0,in1.length - 1); Assert.assertTrue(r > 0); IOUtils.readFully(clientInputStream,in1,r,in1.length - r); Assert.assertTrue(Arrays.equals(serverMsg1,in1)); for (int i=0; i < passedFds.length; i++) { Assert.assertNotNull(recvFis[i]); passedFiles[i].checkInputStream(recvFis[i]); } for ( FileInputStream fis : recvFis) { fis.close(); } client.close(); } catch ( Throwable e) { threadResults.add(e); } threadResults.add(new Success()); } } ; clientThread.start(); for (int i=0; i < 2; i++) { Throwable t=threadResults.take(); if (!(t instanceof Success)) { Assert.fail(t.getMessage() + ExceptionUtils.getStackTrace(t)); } } serverThread.join(120000); clientThread.join(120000); serv.close(); for ( PassedFile pf : passedFiles) { pf.cleanup(); } }

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=180000) public void testShutdown() throws Exception { final AtomicInteger bytesRead=new AtomicInteger(0); final AtomicBoolean failed=new AtomicBoolean(false); final DomainSocket[] socks=DomainSocket.socketpair(); Runnable reader=new Runnable(){ @Override public void run(){ while (true) { try { int ret=socks[1].getInputStream().read(); if (ret == -1) return; bytesRead.addAndGet(1); } catch ( IOException e) { DomainSocket.LOG.error("reader error",e); failed.set(true); return; } } } } ; Thread readerThread=new Thread(reader); readerThread.start(); socks[0].getOutputStream().write(1); socks[0].getOutputStream().write(2); socks[0].getOutputStream().write(3); Assert.assertTrue(readerThread.isAlive()); socks[0].shutdown(); readerThread.join(); Assert.assertFalse(failed.get()); Assert.assertEquals(3,bytesRead.get()); IOUtils.cleanup(null,socks); }

TestInitializer AssumptionSetter HybridVerifier 
@Before public void before(){ Assume.assumeTrue(DomainSocket.getLoadingFailureReason() == null); }

Class: org.apache.hadoop.net.unix.TestDomainSocketWatcher

TestInitializer AssumptionSetter HybridVerifier 
@Before public void before(){ Assume.assumeTrue(DomainSocket.getLoadingFailureReason() == null); }

Class: org.apache.hadoop.nfs.TestNfsExports

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testExactAddressRW(){ NfsExports matcher=new NfsExports(CacheSize,ExpirationPeriod,address1 + " rw"); Assert.assertEquals(AccessPrivilege.READ_WRITE,matcher.getAccessPrivilege(address1,hostname1)); Assert.assertFalse(AccessPrivilege.READ_WRITE == matcher.getAccessPrivilege(address2,hostname1)); }

Class: org.apache.hadoop.nfs.nfs3.TestIdUserGroup

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testIdOutOfIntegerRange() throws IOException { String GET_ALL_USERS_CMD="echo \"" + "nfsnobody:x:4294967294:4294967294:Anonymous NFS User:/var/lib/nfs:/sbin/nologin\n" + "nfsnobody1:x:4294967295:4294967295:Anonymous NFS User:/var/lib/nfs1:/sbin/nologin\n"+ "maxint:x:2147483647:2147483647:Grid Distributed File System:/home/maxint:/bin/bash\n"+ "minint:x:2147483648:2147483648:Grid Distributed File System:/home/minint:/bin/bash\n"+ "archivebackup:*:1031:4294967294:Archive Backup:/home/users/archivebackup:/bin/sh\n"+ "hdfs:x:11501:10787:Grid Distributed File System:/home/hdfs:/bin/bash\n"+ "daemon:x:2:2:daemon:/sbin:/sbin/nologin\""+ " | cut -d: -f1,3"; String GET_ALL_GROUPS_CMD="echo \"" + "hdfs:*:11501:hrt_hdfs\n" + "rpcuser:*:29:\n"+ "nfsnobody:*:4294967294:\n"+ "nfsnobody1:*:4294967295:\n"+ "maxint:*:2147483647:\n"+ "minint:*:2147483648:\n"+ "mapred3:x:498\""+ " | cut -d: -f1,3"; BiMap uMap=HashBiMap.create(); BiMap gMap=HashBiMap.create(); IdUserGroup.updateMapInternal(uMap,"user",GET_ALL_USERS_CMD,":",EMPTY_PASS_THROUGH_MAP); assertTrue(uMap.size() == 7); assertEquals("nfsnobody",uMap.get(-2)); assertEquals("nfsnobody1",uMap.get(-1)); assertEquals("maxint",uMap.get(2147483647)); assertEquals("minint",uMap.get(-2147483648)); assertEquals("archivebackup",uMap.get(1031)); assertEquals("hdfs",uMap.get(11501)); assertEquals("daemon",uMap.get(2)); IdUserGroup.updateMapInternal(gMap,"group",GET_ALL_GROUPS_CMD,":",EMPTY_PASS_THROUGH_MAP); assertTrue(gMap.size() == 7); assertEquals("hdfs",gMap.get(11501)); assertEquals("rpcuser",gMap.get(29)); assertEquals("nfsnobody",gMap.get(-2)); assertEquals("nfsnobody1",gMap.get(-1)); assertEquals("maxint",gMap.get(2147483647)); assertEquals("minint",gMap.get(-2147483648)); assertEquals("mapred3",gMap.get(498)); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testDuplicates() throws IOException { String GET_ALL_USERS_CMD="echo \"root:x:0:0:root:/root:/bin/bash\n" + "hdfs:x:11501:10787:Grid Distributed File System:/home/hdfs:/bin/bash\n" + "hdfs:x:11502:10788:Grid Distributed File System:/home/hdfs:/bin/bash\n"+ "hdfs1:x:11501:10787:Grid Distributed File System:/home/hdfs:/bin/bash\n"+ "hdfs2:x:11502:10787:Grid Distributed File System:/home/hdfs:/bin/bash\n"+ "bin:x:2:2:bin:/bin:/bin/sh\n"+ "bin:x:1:1:bin:/bin:/sbin/nologin\n"+ "daemon:x:1:1:daemon:/usr/sbin:/bin/sh\n"+ "daemon:x:2:2:daemon:/sbin:/sbin/nologin\""+ " | cut -d: -f1,3"; String GET_ALL_GROUPS_CMD="echo \"hdfs:*:11501:hrt_hdfs\n" + "mapred:x:497\n" + "mapred2:x:497\n"+ "mapred:x:498\n"+ "mapred3:x:498\""+ " | cut -d: -f1,3"; BiMap uMap=HashBiMap.create(); BiMap gMap=HashBiMap.create(); IdUserGroup.updateMapInternal(uMap,"user",GET_ALL_USERS_CMD,":",EMPTY_PASS_THROUGH_MAP); assertEquals(5,uMap.size()); assertEquals("root",uMap.get(0)); assertEquals("hdfs",uMap.get(11501)); assertEquals("hdfs2",uMap.get(11502)); assertEquals("bin",uMap.get(2)); assertEquals("daemon",uMap.get(1)); IdUserGroup.updateMapInternal(gMap,"group",GET_ALL_GROUPS_CMD,":",EMPTY_PASS_THROUGH_MAP); assertTrue(gMap.size() == 3); assertEquals("hdfs",gMap.get(11501)); assertEquals("mapred",gMap.get(497)); assertEquals("mapred3",gMap.get(498)); }

Class: org.apache.hadoop.oncrpc.TestFrameDecoder

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testMultipleFrames(){ RpcFrameDecoder decoder=new RpcFrameDecoder(); byte[] fragment1=new byte[4 + 10]; fragment1[0]=0; fragment1[1]=0; fragment1[2]=0; fragment1[3]=(byte)10; assertFalse(XDR.isLastFragment(fragment1)); assertTrue(XDR.fragmentSize(fragment1) == 10); ByteBuffer buffer=ByteBuffer.allocate(4 + 10); buffer.put(fragment1); buffer.flip(); ChannelBuffer buf=new ByteBufferBackedChannelBuffer(buffer); ChannelBuffer channelBuffer=(ChannelBuffer)decoder.decode(Mockito.mock(ChannelHandlerContext.class),Mockito.mock(Channel.class),buf); assertTrue(channelBuffer == null); byte[] fragment2=new byte[4 + 10]; fragment2[0]=(byte)(1 << 7); fragment2[1]=0; fragment2[2]=0; fragment2[3]=(byte)10; assertTrue(XDR.isLastFragment(fragment2)); assertTrue(XDR.fragmentSize(fragment2) == 10); buffer=ByteBuffer.allocate(4 + 10); buffer.put(fragment2); buffer.flip(); buf=new ByteBufferBackedChannelBuffer(buffer); channelBuffer=(ChannelBuffer)decoder.decode(Mockito.mock(ChannelHandlerContext.class),Mockito.mock(Channel.class),buf); assertTrue(channelBuffer != null); assertEquals(20,channelBuffer.readableBytes()); }

Class: org.apache.hadoop.oncrpc.TestRpcCallCache

InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier 
@Test public void testCacheEntry(){ CacheEntry c=new CacheEntry(); validateInprogressCacheEntry(c); assertTrue(c.isInProgress()); assertFalse(c.isCompleted()); assertNull(c.getResponse()); RpcResponse response=mock(RpcResponse.class); c.setResponse(response); validateCompletedCacheEntry(c,response); }

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testCacheFunctionality() throws UnknownHostException { RpcCallCache cache=new RpcCallCache("Test",10); int size=0; for (int clientId=0; clientId < 20; clientId++) { InetAddress clientIp=InetAddress.getByName("1.1.1." + clientId); System.out.println("Adding " + clientIp); cache.checkOrAddToCache(clientIp,0); size=Math.min(++size,10); System.out.println("Cache size " + cache.size()); assertEquals(size,cache.size()); int startEntry=Math.max(clientId - 10 + 1,0); Iterator> iterator=cache.iterator(); for (int i=0; i < size; i++) { ClientRequest key=iterator.next().getKey(); System.out.println("Entry " + key.getClientId()); assertEquals(InetAddress.getByName("1.1.1." + (startEntry + i)),key.getClientId()); } for (int i=0; i < size; i++) { CacheEntry e=cache.checkOrAddToCache(InetAddress.getByName("1.1.1." + (startEntry + i)),0); assertNotNull(e); assertTrue(e.isInProgress()); assertFalse(e.isCompleted()); } } }

Class: org.apache.hadoop.oncrpc.security.TestRpcAuthInfo

EqualityVerifier ExceptionVerifier HybridVerifier 
@Test(expected=IllegalArgumentException.class) public void testInvalidAuthFlavor(){ assertEquals(AuthFlavor.AUTH_NONE,AuthFlavor.fromValue(4)); }

Class: org.apache.hadoop.security.TestCredentials

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@SuppressWarnings("unchecked") @Test public void testReadWriteStorage() throws IOException, NoSuchAlgorithmException { Credentials ts=new Credentials(); Token token1=new Token(); Token token2=new Token(); Text service1=new Text("service1"); Text service2=new Text("service2"); Collection services=new ArrayList(); services.add(service1); services.add(service2); token1.setService(service1); token2.setService(service2); ts.addToken(new Text("sometoken1"),token1); ts.addToken(new Text("sometoken2"),token2); final KeyGenerator kg=KeyGenerator.getInstance(DEFAULT_HMAC_ALGORITHM); String alias="alias"; Map m=new HashMap(10); for (int i=0; i < 10; i++) { Key key=kg.generateKey(); m.put(new Text(alias + i),key.getEncoded()); ts.addSecretKey(new Text(alias + i),key.getEncoded()); } File tmpFileName=new File(tmpDir,"tokenStorageTest"); DataOutputStream dos=new DataOutputStream(new FileOutputStream(tmpFileName)); ts.write(dos); dos.close(); DataInputStream dis=new DataInputStream(new FileInputStream(tmpFileName)); ts=new Credentials(); ts.readFields(dis); dis.close(); Collection> list=ts.getAllTokens(); assertEquals("getAllTokens should return collection of size 2",list.size(),2); boolean foundFirst=false; boolean foundSecond=false; for ( Token token : list) { if (token.getService().equals(service1)) { foundFirst=true; } if (token.getService().equals(service2)) { foundSecond=true; } } assertTrue("Tokens for services service1 and service2 must be present",foundFirst && foundSecond); int mapLen=m.size(); assertEquals("wrong number of keys in the Storage",mapLen,ts.numberOfSecretKeys()); for ( Text a : m.keySet()) { byte[] kTS=ts.getSecretKey(a); byte[] kLocal=m.get(a); assertTrue("keys don't match for " + a,WritableComparator.compareBytes(kTS,0,kTS.length,kLocal,0,kLocal.length) == 0); } tmpFileName.delete(); }

IterativeVerifier InternalCallVerifier IdentityVerifier EqualityVerifier HybridVerifier 
@Test public void testAddTokensToUGI(){ UserGroupInformation ugi=UserGroupInformation.createRemoteUser("someone"); Credentials creds=new Credentials(); for (int i=0; i < service.length; i++) { creds.addToken(service[i],token[i]); } ugi.addCredentials(creds); creds=ugi.getCredentials(); for (int i=0; i < service.length; i++) { assertSame(token[i],creds.getToken(service[i])); } assertEquals(service.length,creds.numberOfTokens()); }

Class: org.apache.hadoop.security.TestGroupsCaching

BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
@Test public void testGroupsCaching() throws Exception { conf.setLong(CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_NEGATIVE_CACHE_SECS,0); Groups groups=new Groups(conf); groups.cacheGroupsAdd(Arrays.asList(myGroups)); groups.refresh(); FakeGroupMapping.clearBlackList(); FakeGroupMapping.addToBlackList("user1"); assertTrue(groups.getGroups("me").size() == 2); FakeGroupMapping.addToBlackList("me"); assertTrue(groups.getGroups("me").size() == 2); try { LOG.error("We are not supposed to get here." + groups.getGroups("user1").toString()); fail(); } catch ( IOException ioe) { if (!ioe.getMessage().startsWith("No groups found")) { LOG.error("Got unexpected exception: " + ioe.getMessage()); fail(); } } FakeGroupMapping.clearBlackList(); assertTrue(groups.getGroups("user1").size() == 2); }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testNegativeGroupCaching() throws Exception { final String user="negcache"; final String failMessage="Did not throw IOException: "; conf.setLong(CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_NEGATIVE_CACHE_SECS,2); FakeTimer timer=new FakeTimer(); Groups groups=new Groups(conf,timer); groups.cacheGroupsAdd(Arrays.asList(myGroups)); groups.refresh(); FakeGroupMapping.addToBlackList(user); try { groups.getGroups(user); fail(failMessage + "Failed to obtain groups from FakeGroupMapping."); } catch ( IOException e) { GenericTestUtils.assertExceptionContains("No groups found for user",e); } try { groups.getGroups(user); fail(failMessage + "The user is in the negative cache."); } catch ( IOException e) { GenericTestUtils.assertExceptionContains("No groups found for user",e); } FakeGroupMapping.clearBlackList(); try { groups.getGroups(user); fail(failMessage + "The user is still in the negative cache, even " + "FakeGroupMapping has resumed."); } catch ( IOException e) { GenericTestUtils.assertExceptionContains("No groups found for user",e); } timer.advance(4 * 1000); assertEquals(Arrays.asList(myGroups),groups.getGroups(user)); }

Class: org.apache.hadoop.security.TestJNIGroupsMapping

TestInitializer AssumptionSetter HybridVerifier 
@Before public void isNativeCodeLoaded(){ assumeTrue(NativeCodeLoader.isNativeCodeLoaded()); }

Class: org.apache.hadoop.security.TestPermission

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testFilePermission() throws Exception { final Configuration conf=new HdfsConfiguration(); conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY,true); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); cluster.waitActive(); try { FileSystem nnfs=FileSystem.get(conf); assertFalse(nnfs.exists(CHILD_FILE1)); try { nnfs.setOwner(CHILD_FILE1,"foo","bar"); assertTrue(false); } catch ( java.io.FileNotFoundException e) { LOG.info("GOOD: got " + e); } try { nnfs.setPermission(CHILD_FILE1,new FsPermission((short)0777)); assertTrue(false); } catch ( java.io.FileNotFoundException e) { LOG.info("GOOD: got " + e); } FSDataOutputStream out=nnfs.create(CHILD_FILE1,new FsPermission((short)0777),true,1024,(short)1,1024,null); FileStatus status=nnfs.getFileStatus(CHILD_FILE1); assertTrue(status.getPermission().toString().equals("rwxr-xr-x")); nnfs.delete(CHILD_FILE1,false); nnfs.mkdirs(CHILD_DIR1); out=nnfs.create(CHILD_FILE1); status=nnfs.getFileStatus(CHILD_FILE1); assertTrue(status.getPermission().toString().equals("rw-r--r--")); byte data[]=new byte[FILE_LEN]; RAN.nextBytes(data); out.write(data); out.close(); nnfs.setPermission(CHILD_FILE1,new FsPermission("700")); status=nnfs.getFileStatus(CHILD_FILE1); assertTrue(status.getPermission().toString().equals("rwx------")); byte dataIn[]=new byte[FILE_LEN]; FSDataInputStream fin=nnfs.open(CHILD_FILE1); int bytesRead=fin.read(dataIn); assertTrue(bytesRead == FILE_LEN); for (int i=0; i < FILE_LEN; i++) { assertEquals(data[i],dataIn[i]); } nnfs.setPermission(CHILD_FILE1,new FsPermission("755")); status=nnfs.getFileStatus(CHILD_FILE1); assertTrue(status.getPermission().toString().equals("rwxr-xr-x")); nnfs.setPermission(CHILD_FILE1,new FsPermission("744")); status=nnfs.getFileStatus(CHILD_FILE1); assertTrue(status.getPermission().toString().equals("rwxr--r--")); nnfs.setPermission(CHILD_FILE1,new FsPermission("700")); UserGroupInformation userGroupInfo=UserGroupInformation.createUserForTesting(USER_NAME,GROUP_NAMES); FileSystem userfs=DFSTestUtil.getFileSystemAs(userGroupInfo,conf); userfs.mkdirs(CHILD_DIR1); assertTrue(!canMkdirs(userfs,CHILD_DIR2)); assertTrue(!canCreate(userfs,CHILD_FILE2)); assertTrue(!canOpen(userfs,CHILD_FILE1)); nnfs.setPermission(ROOT_PATH,new FsPermission((short)0755)); nnfs.setPermission(CHILD_DIR1,new FsPermission("777")); nnfs.setPermission(new Path("/"),new FsPermission((short)0777)); final Path RENAME_PATH=new Path("/foo/bar"); userfs.mkdirs(RENAME_PATH); assertTrue(canRename(userfs,RENAME_PATH,CHILD_DIR1)); } finally { cluster.shutdown(); } }

Class: org.apache.hadoop.security.TestProxyUserFromEnv

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Test HADOOP_PROXY_USER for impersonation */ @Test public void testProxyUserFromEnvironment() throws IOException { String proxyUser="foo.bar"; System.setProperty(UserGroupInformation.HADOOP_PROXY_USER,proxyUser); UserGroupInformation ugi=UserGroupInformation.getLoginUser(); assertEquals(proxyUser,ugi.getUserName()); UserGroupInformation realUgi=ugi.getRealUser(); assertNotNull(realUgi); Process pp=Runtime.getRuntime().exec("whoami"); BufferedReader br=new BufferedReader(new InputStreamReader(pp.getInputStream())); String realUser=br.readLine().trim(); int backslashIndex=realUser.indexOf('\\'); if (backslashIndex != -1) { realUser=realUser.substring(backslashIndex + 1); } assertEquals(realUser,realUgi.getUserName()); }

Class: org.apache.hadoop.security.TestRefreshUserMappings

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testGroupMappingRefresh() throws Exception { DFSAdmin admin=new DFSAdmin(config); String[] args=new String[]{"-refreshUserToGroupsMappings"}; Groups groups=Groups.getUserToGroupsMappingService(config); String user=UserGroupInformation.getCurrentUser().getUserName(); System.out.println("first attempt:"); List g1=groups.getGroups(user); String[] str_groups=new String[g1.size()]; g1.toArray(str_groups); System.out.println(Arrays.toString(str_groups)); System.out.println("second attempt, should be same:"); List g2=groups.getGroups(user); g2.toArray(str_groups); System.out.println(Arrays.toString(str_groups)); for (int i=0; i < g2.size(); i++) { assertEquals("Should be same group ",g1.get(i),g2.get(i)); } admin.run(args); System.out.println("third attempt(after refresh command), should be different:"); List g3=groups.getGroups(user); g3.toArray(str_groups); System.out.println(Arrays.toString(str_groups)); for (int i=0; i < g3.size(); i++) { assertFalse("Should be different group: " + g1.get(i) + " and "+ g3.get(i),g1.get(i).equals(g3.get(i))); } Thread.sleep(groupRefreshTimeoutSec * 1100); System.out.println("fourth attempt(after timeout), should be different:"); List g4=groups.getGroups(user); g4.toArray(str_groups); System.out.println(Arrays.toString(str_groups)); for (int i=0; i < g4.size(); i++) { assertFalse("Should be different group ",g3.get(i).equals(g4.get(i))); } }

Class: org.apache.hadoop.security.TestUGIWithExternalKdc

APIUtilityVerifier UtilityVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testLogin() throws IOException { String userPrincipal=System.getProperty("user.principal"); String userKeyTab=System.getProperty("user.keytab"); Assert.assertNotNull("User principal was not specified",userPrincipal); Assert.assertNotNull("User keytab was not specified",userKeyTab); Configuration conf=new Configuration(); conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION,"kerberos"); UserGroupInformation.setConfiguration(conf); UserGroupInformation ugi=UserGroupInformation.loginUserFromKeytabAndReturnUGI(userPrincipal,userKeyTab); Assert.assertEquals(AuthenticationMethod.KERBEROS,ugi.getAuthenticationMethod()); try { UserGroupInformation.loginUserFromKeytabAndReturnUGI("bogus@EXAMPLE.COM",userKeyTab); Assert.fail("Login should have failed"); } catch ( Exception ex) { ex.printStackTrace(); } }

TestInitializer AssumptionSetter HybridVerifier 
@Before public void testExternalKdcRunning(){ Assume.assumeTrue(isExternalKdcRunning()); }

Class: org.apache.hadoop.security.TestUGIWithSecurityOn

TestInitializer AssumptionSetter HybridVerifier 
@Before public void testKdcRunning(){ Assume.assumeTrue(isKdcRunning()); }

APIUtilityVerifier UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testLogin() throws IOException { String nn1keyTabFilepath=System.getProperty("kdc.resource.dir") + "/keytabs/nn1.keytab"; String user1keyTabFilepath=System.getProperty("kdc.resource.dir") + "/keytabs/user1.keytab"; Configuration conf=new Configuration(); SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS,conf); UserGroupInformation.setConfiguration(conf); UserGroupInformation ugiNn=UserGroupInformation.loginUserFromKeytabAndReturnUGI("nn1/localhost@EXAMPLE.COM",nn1keyTabFilepath); UserGroupInformation ugiDn=UserGroupInformation.loginUserFromKeytabAndReturnUGI("user1@EXAMPLE.COM",user1keyTabFilepath); Assert.assertEquals(AuthenticationMethod.KERBEROS,ugiNn.getAuthenticationMethod()); Assert.assertEquals(AuthenticationMethod.KERBEROS,ugiDn.getAuthenticationMethod()); try { UserGroupInformation.loginUserFromKeytabAndReturnUGI("bogus@EXAMPLE.COM",nn1keyTabFilepath); Assert.fail("Login should have failed"); } catch ( Exception ex) { ex.printStackTrace(); } }

BranchVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testGetUGIFromKerberosSubject() throws IOException { String user1keyTabFilepath=System.getProperty("kdc.resource.dir") + "/keytabs/user1.keytab"; UserGroupInformation ugi=UserGroupInformation.loginUserFromKeytabAndReturnUGI("user1@EXAMPLE.COM",user1keyTabFilepath); Set principals=ugi.getSubject().getPrincipals(KerberosPrincipal.class); if (principals.isEmpty()) { Assert.fail("There should be a kerberos principal in the subject."); } else { UserGroupInformation ugi2=UserGroupInformation.getUGIFromSubject(ugi.getSubject()); if (ugi2 != null) { ugi2.doAs(new PrivilegedAction(){ @Override public Object run(){ try { UserGroupInformation ugi3=UserGroupInformation.getCurrentUser(); String doAsUserName=ugi3.getUserName(); assertEquals(doAsUserName,"user1@EXAMPLE.COM"); System.out.println("DO AS USERNAME: " + doAsUserName); } catch ( IOException e) { e.printStackTrace(); } return null; } } ); } } }

Class: org.apache.hadoop.security.TestUserGroupInformation

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * given user name - get all the groups. * Needs to happen before creating the test users */ @Test(timeout=30000) public void testGetServerSideGroups() throws IOException, InterruptedException { Process pp=Runtime.getRuntime().exec("whoami"); BufferedReader br=new BufferedReader(new InputStreamReader(pp.getInputStream())); String userName=br.readLine().trim(); if (Shell.WINDOWS) { int sp=userName.lastIndexOf('\\'); if (sp != -1) { userName=userName.substring(sp + 1); } userName=userName.toLowerCase(); } pp=Runtime.getRuntime().exec(Shell.WINDOWS ? Shell.WINUTILS + " groups -F" : "id -Gn"); br=new BufferedReader(new InputStreamReader(pp.getInputStream())); String line=br.readLine(); System.out.println(userName + ":" + line); Set groups=new LinkedHashSet(); String[] tokens=line.split(Shell.TOKEN_SEPARATOR_REGEX); for ( String s : tokens) { groups.add(s); } final UserGroupInformation login=UserGroupInformation.getCurrentUser(); String loginUserName=login.getShortUserName(); if (Shell.WINDOWS) { loginUserName=loginUserName.toLowerCase(); } assertEquals(userName,loginUserName); String[] gi=login.getGroupNames(); assertEquals(groups.size(),gi.length); for (int i=0; i < gi.length; i++) { assertTrue(groups.contains(gi[i])); } final UserGroupInformation fakeUser=UserGroupInformation.createRemoteUser("foo.bar"); fakeUser.doAs(new PrivilegedExceptionAction(){ @Override public Object run() throws IOException { UserGroupInformation current=UserGroupInformation.getCurrentUser(); assertFalse(current.equals(login)); assertEquals(current,fakeUser); assertEquals(0,current.getGroupNames().length); return null; } } ); }

APIUtilityVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testGetUGIFromSubject() throws Exception { KerberosPrincipal p=new KerberosPrincipal("guest"); Subject subject=new Subject(); subject.getPrincipals().add(p); UserGroupInformation ugi=UserGroupInformation.getUGIFromSubject(subject); assertNotNull(ugi); assertEquals("guest@DEFAULT.REALM",ugi.getUserName()); }

BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testEnsureInitWithRules() throws IOException { String rules="RULE:[1:RULE1]"; UserGroupInformation.reset(); assertFalse(KerberosName.hasRulesBeenSet()); UserGroupInformation.createUserForTesting("someone",new String[0]); assertTrue(KerberosName.hasRulesBeenSet()); UserGroupInformation.reset(); KerberosName.setRules(rules); assertTrue(KerberosName.hasRulesBeenSet()); assertEquals(rules,KerberosName.getRules()); UserGroupInformation.createUserForTesting("someone",new String[0]); assertEquals(rules,KerberosName.getRules()); }

BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testSetConfigWithRules(){ String[] rules={"RULE:[1:TEST1]","RULE:[1:TEST2]","RULE:[1:TEST3]"}; UserGroupInformation.reset(); assertFalse(KerberosName.hasRulesBeenSet()); KerberosName.setRules(rules[0]); assertTrue(KerberosName.hasRulesBeenSet()); assertEquals(rules[0],KerberosName.getRules()); UserGroupInformation.createUserForTesting("someone",new String[0]); assertEquals(rules[0],KerberosName.getRules()); conf.set(HADOOP_SECURITY_AUTH_TO_LOCAL,rules[1]); UserGroupInformation.setConfiguration(conf); assertEquals(rules[1],KerberosName.getRules()); conf.set(HADOOP_SECURITY_AUTH_TO_LOCAL,rules[2]); UserGroupInformation.setConfiguration(conf); assertEquals(rules[2],KerberosName.getRules()); UserGroupInformation.createUserForTesting("someone",new String[0]); assertEquals(rules[2],KerberosName.getRules()); }

UtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier 
@SuppressWarnings("unchecked") @Test(timeout=30000) public void testUGITokens() throws Exception { UserGroupInformation ugi=UserGroupInformation.createUserForTesting("TheDoctor",new String[]{"TheTARDIS"}); Token t1=mock(Token.class); when(t1.getService()).thenReturn(new Text("t1")); Token t2=mock(Token.class); when(t2.getService()).thenReturn(new Text("t2")); Credentials creds=new Credentials(); byte[] secretKey=new byte[]{}; Text secretName=new Text("shhh"); creds.addSecretKey(secretName,secretKey); ugi.addToken(t1); ugi.addToken(t2); ugi.addCredentials(creds); Collection> z=ugi.getTokens(); assertTrue(z.contains(t1)); assertTrue(z.contains(t2)); assertEquals(2,z.size()); Credentials ugiCreds=ugi.getCredentials(); assertSame(secretKey,ugiCreds.getSecretKey(secretName)); assertEquals(1,ugiCreds.numberOfSecretKeys()); try { z.remove(t1); fail("Shouldn't be able to modify token collection from UGI"); } catch ( UnsupportedOperationException uoe) { } Collection> otherSet=ugi.doAs(new PrivilegedExceptionAction>>(){ @Override public Collection> run() throws IOException { return UserGroupInformation.getCurrentUser().getTokens(); } } ); assertTrue(otherSet.contains(t1)); assertTrue(otherSet.contains(t2)); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test login method */ @Test(timeout=30000) public void testLogin() throws Exception { conf.set(HADOOP_USER_GROUP_METRICS_PERCENTILES_INTERVALS,String.valueOf(PERCENTILES_INTERVAL)); UserGroupInformation.setConfiguration(conf); UserGroupInformation ugi=UserGroupInformation.getCurrentUser(); assertEquals(UserGroupInformation.getCurrentUser(),UserGroupInformation.getLoginUser()); assertTrue(ugi.getGroupNames().length >= 1); verifyGroupMetrics(1); UserGroupInformation userGroupInfo=UserGroupInformation.createUserForTesting(USER_NAME,GROUP_NAMES); UserGroupInformation curUGI=userGroupInfo.doAs(new PrivilegedExceptionAction(){ @Override public UserGroupInformation run() throws IOException { return UserGroupInformation.getCurrentUser(); } } ); assertEquals(curUGI,userGroupInfo); assertFalse(curUGI.equals(UserGroupInformation.getLoginUser())); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testEqualsWithRealUser() throws Exception { UserGroupInformation realUgi1=UserGroupInformation.createUserForTesting("RealUser",GROUP_NAMES); UserGroupInformation proxyUgi1=UserGroupInformation.createProxyUser(USER_NAME,realUgi1); UserGroupInformation proxyUgi2=new UserGroupInformation(proxyUgi1.getSubject()); UserGroupInformation remoteUgi=UserGroupInformation.createRemoteUser(USER_NAME); assertEquals(proxyUgi1,proxyUgi2); assertFalse(remoteUgi.equals(proxyUgi1)); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testCreateRemoteUser(){ UserGroupInformation ugi=UserGroupInformation.createRemoteUser("user1"); assertEquals(AuthenticationMethod.SIMPLE,ugi.getAuthenticationMethod()); assertTrue(ugi.toString().contains("(auth:SIMPLE)")); ugi=UserGroupInformation.createRemoteUser("user1",AuthMethod.KERBEROS); assertEquals(AuthenticationMethod.KERBEROS,ugi.getAuthenticationMethod()); assertTrue(ugi.toString().contains("(auth:KERBEROS)")); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testTokenIdentifiers() throws Exception { UserGroupInformation ugi=UserGroupInformation.createUserForTesting("TheDoctor",new String[]{"TheTARDIS"}); TokenIdentifier t1=mock(TokenIdentifier.class); TokenIdentifier t2=mock(TokenIdentifier.class); ugi.addTokenIdentifier(t1); ugi.addTokenIdentifier(t2); Collection z=ugi.getTokenIdentifiers(); assertTrue(z.contains(t1)); assertTrue(z.contains(t2)); assertEquals(2,z.size()); Collection otherSet=ugi.doAs(new PrivilegedExceptionAction>(){ @Override public Collection run() throws IOException { return UserGroupInformation.getCurrentUser().getTokenIdentifiers(); } } ); assertTrue(otherSet.contains(t1)); assertTrue(otherSet.contains(t2)); assertEquals(2,otherSet.size()); }

IterativeVerifier UtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * This test checks a race condition between getting and adding tokens for * the current user. Calling UserGroupInformation.getCurrentUser() returns * a new object each time, so simply making these methods synchronized was not * enough to prevent race conditions and causing a * ConcurrentModificationException. These methods are synchronized on the * Subject, which is the same object between UserGroupInformation instances. * This test tries to cause a CME, by exposing the race condition. Previously * this test would fail every time; now it does not. */ @Test public void testTokenRaceCondition() throws Exception { UserGroupInformation userGroupInfo=UserGroupInformation.createUserForTesting(USER_NAME,GROUP_NAMES); userGroupInfo.doAs(new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { assertNotEquals(UserGroupInformation.getLoginUser(),UserGroupInformation.getCurrentUser()); GetTokenThread thread=new GetTokenThread(); try { thread.start(); for (int i=0; i < 100; i++) { @SuppressWarnings("unchecked") Token t=mock(Token.class); when(t.getService()).thenReturn(new Text("t" + i)); UserGroupInformation.getCurrentUser().addToken(t); assertNull("ConcurrentModificationException encountered",thread.cme); } } catch ( ConcurrentModificationException cme) { cme.printStackTrace(); fail("ConcurrentModificationException encountered"); } finally { thread.runThread=false; thread.join(5 * 1000); } return null; } } ); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testEquals() throws Exception { UserGroupInformation uugi=UserGroupInformation.createUserForTesting(USER_NAME,GROUP_NAMES); assertEquals(uugi,uugi); UserGroupInformation ugi2=UserGroupInformation.createUserForTesting(USER_NAME,GROUP_NAMES); assertFalse(uugi.equals(ugi2)); assertFalse(uugi.hashCode() == ugi2.hashCode()); UserGroupInformation ugi3=new UserGroupInformation(uugi.getSubject()); assertEquals(uugi,ugi3); assertEquals(uugi.hashCode(),ugi3.hashCode()); }

Class: org.apache.hadoop.security.alias.TestCredShell

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testInvalidProvider() throws Exception { String[] args1={"create","credential1","-value","p@ssw0rd","-provider","sdff://file/tmp/credstore.jceks"}; int rc=0; CredentialShell cs=new CredentialShell(); cs.setConf(new Configuration()); rc=cs.run(args1); assertEquals(1,rc); assertTrue(outContent.toString().contains("There are no valid " + "CredentialProviders configured.")); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testPromptForCredential() throws Exception { String[] args1={"create","credential1","-provider","jceks://file" + tmpDir + "/credstore.jceks"}; ArrayList passwords=new ArrayList(); passwords.add("p@ssw0rd"); passwords.add("p@ssw0rd"); int rc=0; CredentialShell shell=new CredentialShell(); shell.setConf(new Configuration()); shell.setPasswordReader(new MockPasswordReader(passwords)); rc=shell.run(args1); assertEquals(0,rc); assertTrue(outContent.toString().contains("credential1 has been successfully " + "created.")); String[] args2={"delete","credential1","-provider","jceks://file" + tmpDir + "/credstore.jceks"}; rc=shell.run(args2); assertEquals(0,rc); assertTrue(outContent.toString().contains("credential1 has been successfully " + "deleted.")); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testTransientProviderOnlyConfig() throws Exception { String[] args1={"create","credential1"}; int rc=0; CredentialShell cs=new CredentialShell(); Configuration config=new Configuration(); config.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH,"user:///"); cs.setConf(config); rc=cs.run(args1); assertEquals(1,rc); assertTrue(outContent.toString().contains("There are no valid " + "CredentialProviders configured.")); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testCredentialSuccessfulLifecycle() throws Exception { outContent.reset(); String[] args1={"create","credential1","-value","p@ssw0rd","-provider","jceks://file" + tmpDir + "/credstore.jceks"}; int rc=0; CredentialShell cs=new CredentialShell(); cs.setConf(new Configuration()); rc=cs.run(args1); assertEquals(outContent.toString(),0,rc); assertTrue(outContent.toString().contains("credential1 has been successfully " + "created.")); outContent.reset(); String[] args2={"list","-provider","jceks://file" + tmpDir + "/credstore.jceks"}; rc=cs.run(args2); assertEquals(0,rc); assertTrue(outContent.toString().contains("credential1")); outContent.reset(); String[] args4={"delete","credential1","-provider","jceks://file" + tmpDir + "/credstore.jceks"}; rc=cs.run(args4); assertEquals(0,rc); assertTrue(outContent.toString().contains("credential1 has been successfully " + "deleted.")); outContent.reset(); String[] args5={"list","-provider","jceks://file" + tmpDir + "/credstore.jceks"}; rc=cs.run(args5); assertEquals(0,rc); assertFalse(outContent.toString(),outContent.toString().contains("credential1")); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testTransientProviderWarning() throws Exception { String[] args1={"create","credential1","-value","p@ssw0rd","-provider","user:///"}; int rc=0; CredentialShell cs=new CredentialShell(); cs.setConf(new Configuration()); rc=cs.run(args1); assertEquals(outContent.toString(),0,rc); assertTrue(outContent.toString().contains("WARNING: you are modifying a " + "transient provider.")); String[] args2={"delete","credential1","-provider","user:///"}; rc=cs.run(args2); assertEquals(outContent.toString(),0,rc); assertTrue(outContent.toString().contains("credential1 has been successfully " + "deleted.")); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testPromptForCredentialWithEmptyPasswd() throws Exception { String[] args1={"create","credential1","-provider","jceks://file" + tmpDir + "/credstore.jceks"}; ArrayList passwords=new ArrayList(); passwords.add(null); passwords.add("p@ssw0rd"); int rc=0; CredentialShell shell=new CredentialShell(); shell.setConf(new Configuration()); shell.setPasswordReader(new MockPasswordReader(passwords)); rc=shell.run(args1); assertEquals(outContent.toString(),1,rc); assertTrue(outContent.toString().contains("Passwords don't match")); }

Class: org.apache.hadoop.security.alias.TestCredentialProviderFactory

BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testFactoryErrors() throws Exception { Configuration conf=new Configuration(); conf.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH,"unknown:///"); try { List providers=CredentialProviderFactory.getProviders(conf); assertTrue("should throw!",false); } catch ( IOException e) { assertEquals("No CredentialProviderFactory for unknown:/// in " + CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH,e.getMessage()); } }

BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testUriErrors() throws Exception { Configuration conf=new Configuration(); conf.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH,"unkn@own:/x/y"); try { List providers=CredentialProviderFactory.getProviders(conf); assertTrue("should throw!",false); } catch ( IOException e) { assertEquals("Bad configuration of " + CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH + " at unkn@own:/x/y",e.getMessage()); } }

Class: org.apache.hadoop.security.authentication.client.TestAuthenticatedURL

UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
@Test public void testExtractTokenFail() throws Exception { HttpURLConnection conn=Mockito.mock(HttpURLConnection.class); Mockito.when(conn.getResponseCode()).thenReturn(HttpURLConnection.HTTP_UNAUTHORIZED); String tokenStr="foo"; Map> headers=new HashMap>(); List cookies=new ArrayList(); cookies.add(AuthenticatedURL.AUTH_COOKIE + "=" + tokenStr); headers.put("Set-Cookie",cookies); Mockito.when(conn.getHeaderFields()).thenReturn(headers); AuthenticatedURL.Token token=new AuthenticatedURL.Token(); token.set("bar"); try { AuthenticatedURL.extractToken(conn,token); Assert.fail(); } catch ( AuthenticationException ex) { Assert.assertFalse(token.isSet()); } catch ( Exception ex) { Assert.fail(); } }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testToken() throws Exception { AuthenticatedURL.Token token=new AuthenticatedURL.Token(); Assert.assertFalse(token.isSet()); token=new AuthenticatedURL.Token("foo"); Assert.assertTrue(token.isSet()); Assert.assertEquals("foo",token.toString()); }

Class: org.apache.hadoop.security.authentication.client.TestKerberosAuthenticator

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=60000) public void testNotAuthenticated() throws Exception { AuthenticatorTestCase auth=new AuthenticatorTestCase(); AuthenticatorTestCase.setAuthenticationHandlerConfig(getAuthenticationHandlerConfiguration()); auth.start(); try { URL url=new URL(auth.getBaseURL()); HttpURLConnection conn=(HttpURLConnection)url.openConnection(); conn.connect(); Assert.assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED,conn.getResponseCode()); Assert.assertTrue(conn.getHeaderField(KerberosAuthenticator.WWW_AUTHENTICATE) != null); } finally { auth.stop(); } }

Class: org.apache.hadoop.security.authentication.server.TestAuthenticationFilter

UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testInitEmpty() throws Exception { AuthenticationFilter filter=new AuthenticationFilter(); try { FilterConfig config=Mockito.mock(FilterConfig.class); Mockito.when(config.getInitParameterNames()).thenReturn(new Vector().elements()); filter.init(config); Assert.fail(); } catch ( ServletException ex) { Assert.assertEquals("Authentication type must be specified: simple|kerberos|",ex.getMessage()); } catch ( Exception ex) { Assert.fail(); } finally { filter.destroy(); } }

BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testGetTokenExpired() throws Exception { AuthenticationFilter filter=new AuthenticationFilter(); try { FilterConfig config=Mockito.mock(FilterConfig.class); Mockito.when(config.getInitParameter("management.operation.return")).thenReturn("true"); Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn(DummyAuthenticationHandler.class.getName()); Mockito.when(config.getInitParameter(AuthenticationFilter.SIGNATURE_SECRET)).thenReturn("secret"); Mockito.when(config.getInitParameterNames()).thenReturn(new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE,AuthenticationFilter.SIGNATURE_SECRET,"management.operation.return")).elements()); ServletContext context=Mockito.mock(ServletContext.class); Mockito.when(context.getAttribute(AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(null); Mockito.when(config.getServletContext()).thenReturn(context); filter.init(config); AuthenticationToken token=new AuthenticationToken("u","p",DummyAuthenticationHandler.TYPE); token.setExpires(System.currentTimeMillis() - TOKEN_VALIDITY_SEC); Signer signer=new Signer(new StringSignerSecretProvider("secret")); String tokenSigned=signer.sign(token.toString()); Cookie cookie=new Cookie(AuthenticatedURL.AUTH_COOKIE,tokenSigned); HttpServletRequest request=Mockito.mock(HttpServletRequest.class); Mockito.when(request.getCookies()).thenReturn(new Cookie[]{cookie}); boolean failed=false; try { filter.getToken(request); } catch ( AuthenticationException ex) { Assert.assertEquals("AuthenticationToken expired",ex.getMessage()); failed=true; } finally { Assert.assertTrue("token not expired",failed); } } finally { filter.destroy(); } }

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testInit() throws Exception { AuthenticationFilter filter=new AuthenticationFilter(); try { FilterConfig config=Mockito.mock(FilterConfig.class); Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn("simple"); Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TOKEN_VALIDITY)).thenReturn((new Long(TOKEN_VALIDITY_SEC)).toString()); Mockito.when(config.getInitParameterNames()).thenReturn(new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE,AuthenticationFilter.AUTH_TOKEN_VALIDITY)).elements()); ServletContext context=Mockito.mock(ServletContext.class); Mockito.when(context.getAttribute(AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(null); Mockito.when(config.getServletContext()).thenReturn(context); filter.init(config); Assert.assertEquals(PseudoAuthenticationHandler.class,filter.getAuthenticationHandler().getClass()); Assert.assertTrue(filter.isRandomSecret()); Assert.assertFalse(filter.isCustomSignerSecretProvider()); Assert.assertNull(filter.getCookieDomain()); Assert.assertNull(filter.getCookiePath()); Assert.assertEquals(TOKEN_VALIDITY_SEC,filter.getValidity()); } finally { filter.destroy(); } filter=new AuthenticationFilter(); try { FilterConfig config=Mockito.mock(FilterConfig.class); Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn("simple"); Mockito.when(config.getInitParameter(AuthenticationFilter.SIGNATURE_SECRET)).thenReturn("secret"); Mockito.when(config.getInitParameterNames()).thenReturn(new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE,AuthenticationFilter.SIGNATURE_SECRET)).elements()); ServletContext context=Mockito.mock(ServletContext.class); Mockito.when(context.getAttribute(AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(null); Mockito.when(config.getServletContext()).thenReturn(context); filter.init(config); Assert.assertFalse(filter.isRandomSecret()); Assert.assertFalse(filter.isCustomSignerSecretProvider()); } finally { filter.destroy(); } filter=new AuthenticationFilter(); try { FilterConfig config=Mockito.mock(FilterConfig.class); Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn("simple"); Mockito.when(config.getInitParameter(AuthenticationFilter.SIGNATURE_SECRET)).thenReturn("secret"); Mockito.when(config.getInitParameterNames()).thenReturn(new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE,AuthenticationFilter.SIGNATURE_SECRET)).elements()); ServletContext context=Mockito.mock(ServletContext.class); Mockito.when(context.getAttribute(AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(new SignerSecretProvider(){ @Override public void init( Properties config, long tokenValidity){ } @Override public byte[] getCurrentSecret(){ return null; } @Override public byte[][] getAllSecrets(){ return null; } } ); Mockito.when(config.getServletContext()).thenReturn(context); filter.init(config); Assert.assertFalse(filter.isRandomSecret()); Assert.assertTrue(filter.isCustomSignerSecretProvider()); } finally { filter.destroy(); } filter=new AuthenticationFilter(); try { FilterConfig config=Mockito.mock(FilterConfig.class); Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn("simple"); Mockito.when(config.getInitParameter(AuthenticationFilter.COOKIE_DOMAIN)).thenReturn(".foo.com"); Mockito.when(config.getInitParameter(AuthenticationFilter.COOKIE_PATH)).thenReturn("/bar"); Mockito.when(config.getInitParameterNames()).thenReturn(new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE,AuthenticationFilter.COOKIE_DOMAIN,AuthenticationFilter.COOKIE_PATH)).elements()); ServletContext context=Mockito.mock(ServletContext.class); Mockito.when(context.getAttribute(AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(null); Mockito.when(config.getServletContext()).thenReturn(context); filter.init(config); Assert.assertEquals(".foo.com",filter.getCookieDomain()); Assert.assertEquals("/bar",filter.getCookiePath()); } finally { filter.destroy(); } DummyAuthenticationHandler.reset(); filter=new AuthenticationFilter(); try { FilterConfig config=Mockito.mock(FilterConfig.class); Mockito.when(config.getInitParameter("management.operation.return")).thenReturn("true"); Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn(DummyAuthenticationHandler.class.getName()); Mockito.when(config.getInitParameterNames()).thenReturn(new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE,"management.operation.return")).elements()); ServletContext context=Mockito.mock(ServletContext.class); Mockito.when(context.getAttribute(AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(null); Mockito.when(config.getServletContext()).thenReturn(context); filter.init(config); Assert.assertTrue(DummyAuthenticationHandler.init); } finally { filter.destroy(); Assert.assertTrue(DummyAuthenticationHandler.destroy); } filter=new AuthenticationFilter(); try { FilterConfig config=Mockito.mock(FilterConfig.class); Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn("kerberos"); Mockito.when(config.getInitParameterNames()).thenReturn(new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE)).elements()); filter.init(config); } catch ( ServletException ex) { } finally { Assert.assertEquals(KerberosAuthenticationHandler.class,filter.getAuthenticationHandler().getClass()); filter.destroy(); } }

APIUtilityVerifier UtilityVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testDoFilterAuthenticationFailure() throws Exception { AuthenticationFilter filter=new AuthenticationFilter(); try { FilterConfig config=Mockito.mock(FilterConfig.class); Mockito.when(config.getInitParameter("management.operation.return")).thenReturn("true"); Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn(DummyAuthenticationHandler.class.getName()); Mockito.when(config.getInitParameterNames()).thenReturn(new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE,"management.operation.return")).elements()); ServletContext context=Mockito.mock(ServletContext.class); Mockito.when(context.getAttribute(AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(null); Mockito.when(config.getServletContext()).thenReturn(context); filter.init(config); HttpServletRequest request=Mockito.mock(HttpServletRequest.class); Mockito.when(request.getRequestURL()).thenReturn(new StringBuffer("http://foo:8080/bar")); Mockito.when(request.getCookies()).thenReturn(new Cookie[]{}); Mockito.when(request.getHeader("WWW-Authenticate")).thenReturn("dummyauth"); HttpServletResponse response=Mockito.mock(HttpServletResponse.class); FilterChain chain=Mockito.mock(FilterChain.class); final HashMap cookieMap=new HashMap(); Mockito.doAnswer(new Answer(){ @Override public Object answer( InvocationOnMock invocation) throws Throwable { Object[] args=invocation.getArguments(); parseCookieMap((String)args[1],cookieMap); return null; } } ).when(response).addHeader(Mockito.eq("Set-Cookie"),Mockito.anyString()); Mockito.doAnswer(new Answer(){ @Override public Object answer( InvocationOnMock invocation) throws Throwable { Assert.fail("shouldn't get here"); return null; } } ).when(chain).doFilter(Mockito.anyObject(),Mockito.anyObject()); filter.doFilter(request,response,chain); Mockito.verify(response).sendError(HttpServletResponse.SC_FORBIDDEN,"AUTH FAILED"); Mockito.verify(response,Mockito.never()).setHeader(Mockito.eq("WWW-Authenticate"),Mockito.anyString()); String value=cookieMap.get(AuthenticatedURL.AUTH_COOKIE); Assert.assertNotNull("cookie missing",value); Assert.assertEquals("",value); } finally { filter.destroy(); } }

BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testGetTokenInvalidType() throws Exception { AuthenticationFilter filter=new AuthenticationFilter(); try { FilterConfig config=Mockito.mock(FilterConfig.class); Mockito.when(config.getInitParameter("management.operation.return")).thenReturn("true"); Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn(DummyAuthenticationHandler.class.getName()); Mockito.when(config.getInitParameter(AuthenticationFilter.SIGNATURE_SECRET)).thenReturn("secret"); Mockito.when(config.getInitParameterNames()).thenReturn(new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE,AuthenticationFilter.SIGNATURE_SECRET,"management.operation.return")).elements()); ServletContext context=Mockito.mock(ServletContext.class); Mockito.when(context.getAttribute(AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(null); Mockito.when(config.getServletContext()).thenReturn(context); filter.init(config); AuthenticationToken token=new AuthenticationToken("u","p","invalidtype"); token.setExpires(System.currentTimeMillis() + TOKEN_VALIDITY_SEC); Signer signer=new Signer(new StringSignerSecretProvider("secret")); String tokenSigned=signer.sign(token.toString()); Cookie cookie=new Cookie(AuthenticatedURL.AUTH_COOKIE,tokenSigned); HttpServletRequest request=Mockito.mock(HttpServletRequest.class); Mockito.when(request.getCookies()).thenReturn(new Cookie[]{cookie}); boolean failed=false; try { filter.getToken(request); } catch ( AuthenticationException ex) { Assert.assertEquals("Invalid AuthenticationToken type",ex.getMessage()); failed=true; } finally { Assert.assertTrue("token not invalid type",failed); } } finally { filter.destroy(); } }

Class: org.apache.hadoop.security.authentication.server.TestAuthenticationToken

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testGetters() throws Exception { long expires=System.currentTimeMillis() + 50; AuthenticationToken token=new AuthenticationToken("u","p","t"); token.setExpires(expires); Assert.assertEquals("u",token.getUserName()); Assert.assertEquals("p",token.getName()); Assert.assertEquals("t",token.getType()); Assert.assertEquals(expires,token.getExpires()); Assert.assertFalse(token.isExpired()); Thread.sleep(70); Assert.assertTrue(token.isExpired()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testToStringAndParse() throws Exception { long expires=System.currentTimeMillis() + 50; AuthenticationToken token=new AuthenticationToken("u","p","t"); token.setExpires(expires); String str=token.toString(); token=AuthenticationToken.parse(str); Assert.assertEquals("p",token.getName()); Assert.assertEquals("t",token.getType()); Assert.assertEquals(expires,token.getExpires()); Assert.assertFalse(token.isExpired()); Thread.sleep(70); Assert.assertTrue(token.isExpired()); }

BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testAnonymous(){ Assert.assertNotNull(AuthenticationToken.ANONYMOUS); Assert.assertEquals(null,AuthenticationToken.ANONYMOUS.getUserName()); Assert.assertEquals(null,AuthenticationToken.ANONYMOUS.getName()); Assert.assertEquals(null,AuthenticationToken.ANONYMOUS.getType()); Assert.assertEquals(-1,AuthenticationToken.ANONYMOUS.getExpires()); Assert.assertFalse(AuthenticationToken.ANONYMOUS.isExpired()); }

Class: org.apache.hadoop.security.authentication.server.TestKerberosAuthenticationHandler

UtilityVerifier EqualityVerifier HybridVerifier 
@Test(timeout=60000) public void testNameRules() throws Exception { KerberosName kn=new KerberosName(KerberosTestUtils.getServerPrincipal()); Assert.assertEquals(KerberosTestUtils.getRealm(),kn.getRealm()); handler.destroy(); KerberosName.setRules("RULE:[1:$1@$0](.*@FOO)s/@.*//\nDEFAULT"); handler=getNewAuthenticationHandler(); Properties props=getDefaultProperties(); props.setProperty(KerberosAuthenticationHandler.NAME_RULES,"RULE:[1:$1@$0](.*@BAR)s/@.*//\nDEFAULT"); try { handler.init(props); } catch ( Exception ex) { } kn=new KerberosName("bar@BAR"); Assert.assertEquals("bar",kn.getShortName()); kn=new KerberosName("bar@FOO"); try { kn.getShortName(); Assert.fail(); } catch ( Exception ex) { } }

UtilityVerifier EqualityVerifier HybridVerifier 
@Test(timeout=60000) public void testDynamicPrincipalDiscoveryMissingPrincipals() throws Exception { String[] keytabUsers=new String[]{"hdfs/localhost"}; String keytab=KerberosTestUtils.getKeytabFile(); getKdc().createPrincipal(new File(keytab),keytabUsers); handler.destroy(); Properties props=new Properties(); props.setProperty(KerberosAuthenticationHandler.KEYTAB,keytab); props.setProperty(KerberosAuthenticationHandler.PRINCIPAL,"*"); handler=getNewAuthenticationHandler(); try { handler.init(props); Assert.fail("init should have failed"); } catch ( ServletException ex) { Assert.assertEquals("Principals do not exist in the keytab",ex.getCause().getMessage()); } catch ( Throwable t) { Assert.fail("wrong exception: " + t); } }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=60000) public void testInit() throws Exception { Assert.assertEquals(KerberosTestUtils.getKeytabFile(),handler.getKeytab()); Set principals=handler.getPrincipals(); Principal expectedPrincipal=new KerberosPrincipal(KerberosTestUtils.getServerPrincipal()); Assert.assertTrue(principals.contains(expectedPrincipal)); Assert.assertEquals(1,principals.size()); }

Class: org.apache.hadoop.security.authentication.util.TestKerberosUtil

APIUtilityVerifier BranchVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testGetPrincipalNamesFromKeytabWithPattern() throws IOException { createKeyTab(testKeytab,testPrincipals); Pattern httpPattern=Pattern.compile("HTTP/.*"); String[] httpPrincipals=KerberosUtil.getPrincipalNames(testKeytab,httpPattern); Assert.assertNotNull("principals cannot be null",httpPrincipals); int expectedSize=0; List httpPrincipalList=Arrays.asList(httpPrincipals); for ( String principal : testPrincipals) { if (httpPattern.matcher(principal).matches()) { Assert.assertTrue("missing principal " + principal,httpPrincipalList.contains(principal)); expectedSize++; } } Assert.assertEquals(expectedSize,httpPrincipals.length); }

APIUtilityVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testGetPrincipalNamesFromKeytab() throws IOException { createKeyTab(testKeytab,testPrincipals); String[] principals=KerberosUtil.getPrincipalNames(testKeytab); Assert.assertNotNull("principals cannot be null",principals); int expectedSize=0; List principalList=Arrays.asList(principals); for ( String principal : testPrincipals) { Assert.assertTrue("missing principal " + principal,principalList.contains(principal)); expectedSize++; } Assert.assertEquals(expectedSize,principals.length); }

Class: org.apache.hadoop.security.authentication.util.TestRandomSignerSecretProvider

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testGetAndRollSecrets() throws Exception { long rolloverFrequency=15 * 1000; long seed=System.currentTimeMillis(); Random rand=new Random(seed); byte[] secret1=Long.toString(rand.nextLong()).getBytes(); byte[] secret2=Long.toString(rand.nextLong()).getBytes(); byte[] secret3=Long.toString(rand.nextLong()).getBytes(); RandomSignerSecretProvider secretProvider=new RandomSignerSecretProvider(seed); try { secretProvider.init(null,rolloverFrequency); byte[] currentSecret=secretProvider.getCurrentSecret(); byte[][] allSecrets=secretProvider.getAllSecrets(); Assert.assertArrayEquals(secret1,currentSecret); Assert.assertEquals(2,allSecrets.length); Assert.assertArrayEquals(secret1,allSecrets[0]); Assert.assertNull(allSecrets[1]); Thread.sleep(rolloverFrequency + 2000); currentSecret=secretProvider.getCurrentSecret(); allSecrets=secretProvider.getAllSecrets(); Assert.assertArrayEquals(secret2,currentSecret); Assert.assertEquals(2,allSecrets.length); Assert.assertArrayEquals(secret2,allSecrets[0]); Assert.assertArrayEquals(secret1,allSecrets[1]); Thread.sleep(rolloverFrequency + 2000); currentSecret=secretProvider.getCurrentSecret(); allSecrets=secretProvider.getAllSecrets(); Assert.assertArrayEquals(secret3,currentSecret); Assert.assertEquals(2,allSecrets.length); Assert.assertArrayEquals(secret3,allSecrets[0]); Assert.assertArrayEquals(secret2,allSecrets[1]); Thread.sleep(rolloverFrequency + 2000); } finally { secretProvider.destroy(); } }

Class: org.apache.hadoop.security.authentication.util.TestRolloverSignerSecretProvider

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testGetAndRollSecrets() throws Exception { long rolloverFrequency=15 * 1000; byte[] secret1="doctor".getBytes(); byte[] secret2="who".getBytes(); byte[] secret3="tardis".getBytes(); TRolloverSignerSecretProvider secretProvider=new TRolloverSignerSecretProvider(new byte[][]{secret1,secret2,secret3}); try { secretProvider.init(null,rolloverFrequency); byte[] currentSecret=secretProvider.getCurrentSecret(); byte[][] allSecrets=secretProvider.getAllSecrets(); Assert.assertArrayEquals(secret1,currentSecret); Assert.assertEquals(2,allSecrets.length); Assert.assertArrayEquals(secret1,allSecrets[0]); Assert.assertNull(allSecrets[1]); Thread.sleep(rolloverFrequency + 2000); currentSecret=secretProvider.getCurrentSecret(); allSecrets=secretProvider.getAllSecrets(); Assert.assertArrayEquals(secret2,currentSecret); Assert.assertEquals(2,allSecrets.length); Assert.assertArrayEquals(secret2,allSecrets[0]); Assert.assertArrayEquals(secret1,allSecrets[1]); Thread.sleep(rolloverFrequency + 2000); currentSecret=secretProvider.getCurrentSecret(); allSecrets=secretProvider.getAllSecrets(); Assert.assertArrayEquals(secret3,currentSecret); Assert.assertEquals(2,allSecrets.length); Assert.assertArrayEquals(secret3,allSecrets[0]); Assert.assertArrayEquals(secret2,allSecrets[1]); Thread.sleep(rolloverFrequency + 2000); } finally { secretProvider.destroy(); } }

Class: org.apache.hadoop.security.authentication.util.TestSigner

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testMultipleSecrets() throws Exception { TestSignerSecretProvider secretProvider=new TestSignerSecretProvider(); Signer signer=new Signer(secretProvider); secretProvider.setCurrentSecret("secretB"); String t1="test"; String s1=signer.sign(t1); String e1=signer.verifyAndExtract(s1); Assert.assertEquals(t1,e1); secretProvider.setPreviousSecret("secretA"); String t2="test"; String s2=signer.sign(t2); String e2=signer.verifyAndExtract(s2); Assert.assertEquals(t2,e2); Assert.assertEquals(s1,s2); secretProvider.setCurrentSecret("secretC"); secretProvider.setPreviousSecret("secretB"); String t3="test"; String s3=signer.sign(t3); String e3=signer.verifyAndExtract(s3); Assert.assertEquals(t3,e3); Assert.assertNotEquals(s1,s3); String e1b=signer.verifyAndExtract(s1); Assert.assertEquals(t1,e1b); secretProvider.setCurrentSecret("secretD"); secretProvider.setPreviousSecret("secretC"); try { signer.verifyAndExtract(s1); Assert.fail(); } catch ( SignerException ex) { } }

Class: org.apache.hadoop.security.authorize.TestAccessControlList

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test addUser/Group and removeUser/Group api. */ @Test public void testAddRemoveAPI(){ AccessControlList acl; Collection users; Collection groups; acl=new AccessControlList(" "); assertEquals(0,acl.getUsers().size()); assertEquals(0,acl.getGroups().size()); assertEquals(" ",acl.getAclString()); acl.addUser("drwho"); users=acl.getUsers(); assertEquals(users.size(),1); assertEquals(users.iterator().next(),"drwho"); assertEquals("drwho ",acl.getAclString()); acl.addGroup("tardis"); groups=acl.getGroups(); assertEquals(groups.size(),1); assertEquals(groups.iterator().next(),"tardis"); assertEquals("drwho tardis",acl.getAclString()); acl.addUser("joe"); acl.addGroup("users"); users=acl.getUsers(); assertEquals(users.size(),2); Iterator iter=users.iterator(); assertEquals(iter.next(),"drwho"); assertEquals(iter.next(),"joe"); groups=acl.getGroups(); assertEquals(groups.size(),2); iter=groups.iterator(); assertEquals(iter.next(),"tardis"); assertEquals(iter.next(),"users"); assertEquals("drwho,joe tardis,users",acl.getAclString()); acl.removeUser("joe"); acl.removeGroup("users"); users=acl.getUsers(); assertEquals(users.size(),1); assertFalse(users.contains("joe")); groups=acl.getGroups(); assertEquals(groups.size(),1); assertFalse(groups.contains("users")); assertEquals("drwho tardis",acl.getAclString()); acl.removeGroup("tardis"); groups=acl.getGroups(); assertEquals(0,groups.size()); assertFalse(groups.contains("tardis")); assertEquals("drwho ",acl.getAclString()); acl.removeUser("drwho"); assertEquals(0,users.size()); assertFalse(users.contains("drwho")); assertEquals(0,acl.getGroups().size()); assertEquals(0,acl.getUsers().size()); assertEquals(" ",acl.getAclString()); }

BooleanVerifier NullVerifier HybridVerifier 
/** * Tests adding/removing wild card as the user/group. */ @Test public void testAddRemoveWildCard(){ AccessControlList acl=new AccessControlList("drwho tardis"); Throwable th=null; try { acl.addUser(" * "); } catch ( Throwable t) { th=t; } assertNotNull(th); assertTrue(th instanceof IllegalArgumentException); th=null; try { acl.addGroup(" * "); } catch ( Throwable t) { th=t; } assertNotNull(th); assertTrue(th instanceof IllegalArgumentException); th=null; try { acl.removeUser(" * "); } catch ( Throwable t) { th=t; } assertNotNull(th); assertTrue(th instanceof IllegalArgumentException); th=null; try { acl.removeGroup(" * "); } catch ( Throwable t) { th=t; } assertNotNull(th); assertTrue(th instanceof IllegalArgumentException); }

Class: org.apache.hadoop.security.ssl.TestSSLFactory

TestCleaner TestInitializer HybridVerifier 
@After @Before public void cleanUp() throws Exception { sslConfsDir=KeyStoreTestUtil.getClasspathDir(TestSSLFactory.class); KeyStoreTestUtil.cleanupSSLConfig(KEYSTORES_DIR,sslConfsDir); }

InternalCallVerifier NullVerifier ExceptionVerifier HybridVerifier 
@Test(expected=IllegalStateException.class) public void clientMode() throws Exception { Configuration conf=createConfiguration(false,true); SSLFactory sslFactory=new SSLFactory(SSLFactory.Mode.CLIENT,conf); try { sslFactory.init(); Assert.assertNotNull(sslFactory.createSSLSocketFactory()); Assert.assertNotNull(sslFactory.getHostnameVerifier()); sslFactory.createSSLServerSocketFactory(); } finally { sslFactory.destroy(); } }

IdentityVerifier EqualityVerifier HybridVerifier 
@Test public void testConnectionConfigurator() throws Exception { Configuration conf=createConfiguration(false,true); conf.set(SSLFactory.SSL_HOSTNAME_VERIFIER_KEY,"STRICT_IE6"); SSLFactory sslFactory=new SSLFactory(SSLFactory.Mode.CLIENT,conf); try { sslFactory.init(); HttpsURLConnection sslConn=(HttpsURLConnection)new URL("https://foo").openConnection(); Assert.assertNotSame("STRICT_IE6",sslConn.getHostnameVerifier().toString()); sslFactory.configure(sslConn); Assert.assertEquals("STRICT_IE6",sslConn.getHostnameVerifier().toString()); } finally { sslFactory.destroy(); } }

Class: org.apache.hadoop.security.token.delegation.TestDelegationToken

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testParallelDelegationTokenCreation() throws Exception { final TestDelegationTokenSecretManager dtSecretManager=new TestDelegationTokenSecretManager(2000,24 * 60 * 60* 1000,7 * 24 * 60* 60* 1000,2000); try { dtSecretManager.startThreads(); int numThreads=100; final int numTokensPerThread=100; class tokenIssuerThread implements Runnable { @Override public void run(){ for (int i=0; i < numTokensPerThread; i++) { generateDelegationToken(dtSecretManager,"auser","arenewer"); try { Thread.sleep(250); } catch ( Exception e) { } } } } Thread[] issuers=new Thread[numThreads]; for (int i=0; i < numThreads; i++) { issuers[i]=new Daemon(new tokenIssuerThread()); issuers[i].start(); } for (int i=0; i < numThreads; i++) { issuers[i].join(); } Map tokenCache=dtSecretManager.getAllTokens(); Assert.assertEquals(numTokensPerThread * numThreads,tokenCache.size()); Iterator iter=tokenCache.keySet().iterator(); while (iter.hasNext()) { TestDelegationTokenIdentifier id=iter.next(); DelegationTokenInformation info=tokenCache.get(id); Assert.assertTrue(info != null); DelegationKey key=dtSecretManager.getKey(id); Assert.assertTrue(key != null); byte[] storedPassword=dtSecretManager.retrievePassword(id); byte[] password=dtSecretManager.createPassword(id,key); Assert.assertTrue(Arrays.equals(password,storedPassword)); dtSecretManager.verifyToken(id,password); } } finally { dtSecretManager.stopThreads(); } }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testGetUserWithOwnerEqualsReal(){ Text owner=new Text("owner"); TestDelegationTokenIdentifier ident=new TestDelegationTokenIdentifier(owner,null,owner); UserGroupInformation ugi=ident.getUser(); assertNull(ugi.getRealUser()); assertEquals("owner",ugi.getUserName()); assertEquals(AuthenticationMethod.TOKEN,ugi.getAuthenticationMethod()); }

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
@Test public void testDelegationTokenSecretManager() throws Exception { final TestDelegationTokenSecretManager dtSecretManager=new TestDelegationTokenSecretManager(24 * 60 * 60* 1000,3 * 1000,1 * 1000,3600000); try { dtSecretManager.startThreads(); final Token token=generateDelegationToken(dtSecretManager,"SomeUser","JobTracker"); Assert.assertTrue(dtSecretManager.isStoreNewTokenCalled); shouldThrow(new PrivilegedExceptionAction(){ @Override public Object run() throws Exception { dtSecretManager.renewToken(token,"FakeRenewer"); return null; } } ,AccessControlException.class); long time=dtSecretManager.renewToken(token,"JobTracker"); Assert.assertTrue(dtSecretManager.isUpdateStoredTokenCalled); assertTrue("renew time is in future",time > Time.now()); TestDelegationTokenIdentifier identifier=new TestDelegationTokenIdentifier(); byte[] tokenId=token.getIdentifier(); identifier.readFields(new DataInputStream(new ByteArrayInputStream(tokenId))); Assert.assertTrue(null != dtSecretManager.retrievePassword(identifier)); LOG.info("Sleep to expire the token"); Thread.sleep(2000); try { dtSecretManager.retrievePassword(identifier); Assert.fail("Token should have expired"); } catch ( InvalidToken e) { } dtSecretManager.renewToken(token,"JobTracker"); LOG.info("Sleep beyond the max lifetime"); Thread.sleep(2000); shouldThrow(new PrivilegedExceptionAction(){ @Override public Object run() throws Exception { dtSecretManager.renewToken(token,"JobTracker"); return null; } } ,InvalidToken.class); } finally { dtSecretManager.stopThreads(); } }

UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
@Test public void testDelegationTokenNullRenewer() throws Exception { TestDelegationTokenSecretManager dtSecretManager=new TestDelegationTokenSecretManager(24 * 60 * 60* 1000,10 * 1000,1 * 1000,3600000); dtSecretManager.startThreads(); TestDelegationTokenIdentifier dtId=new TestDelegationTokenIdentifier(new Text("theuser"),null,null); Token token=new Token(dtId,dtSecretManager); Assert.assertTrue(token != null); try { dtSecretManager.renewToken(token,""); Assert.fail("Renewal must not succeed"); } catch ( IOException e) { } }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testGetUserWithOwnerAndReal(){ Text owner=new Text("owner"); Text realUser=new Text("realUser"); TestDelegationTokenIdentifier ident=new TestDelegationTokenIdentifier(owner,null,realUser); UserGroupInformation ugi=ident.getUser(); assertNotNull(ugi.getRealUser()); assertNull(ugi.getRealUser().getRealUser()); assertEquals("owner",ugi.getUserName()); assertEquals("realUser",ugi.getRealUser().getUserName()); assertEquals(AuthenticationMethod.PROXY,ugi.getAuthenticationMethod()); assertEquals(AuthenticationMethod.TOKEN,ugi.getRealUser().getAuthenticationMethod()); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testGetUserWithOwner(){ TestDelegationTokenIdentifier ident=new TestDelegationTokenIdentifier(new Text("owner"),null,null); UserGroupInformation ugi=ident.getUser(); assertNull(ugi.getRealUser()); assertEquals("owner",ugi.getUserName()); assertEquals(AuthenticationMethod.TOKEN,ugi.getAuthenticationMethod()); }

BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testDelegationKeyEqualAndHash(){ DelegationKey key1=new DelegationKey(1111,2222,"keyBytes".getBytes()); DelegationKey key2=new DelegationKey(1111,2222,"keyBytes".getBytes()); DelegationKey key3=new DelegationKey(3333,2222,"keyBytes".getBytes()); Assert.assertEquals(key1,key2); Assert.assertFalse(key2.equals(key3)); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testRollMasterKey() throws Exception { TestDelegationTokenSecretManager dtSecretManager=new TestDelegationTokenSecretManager(800,800,1 * 1000,3600000); try { dtSecretManager.startThreads(); Token token=generateDelegationToken(dtSecretManager,"SomeUser","JobTracker"); byte[] oldPasswd=token.getPassword(); int prevNumKeys=dtSecretManager.getAllKeys().length; dtSecretManager.rollMasterKey(); Assert.assertTrue(dtSecretManager.isStoreNewMasterKeyCalled); int currNumKeys=dtSecretManager.getAllKeys().length; Assert.assertEquals((currNumKeys - prevNumKeys) >= 1,true); ByteArrayInputStream bi=new ByteArrayInputStream(token.getIdentifier()); TestDelegationTokenIdentifier identifier=dtSecretManager.createIdentifier(); identifier.readFields(new DataInputStream(bi)); byte[] newPasswd=dtSecretManager.retrievePassword(identifier); Assert.assertEquals(oldPasswd,newPasswd); while (!dtSecretManager.isRemoveStoredMasterKeyCalled) { Thread.sleep(200); } } finally { dtSecretManager.stopThreads(); } }

Class: org.apache.hadoop.security.token.delegation.web.TestDelegationTokenManager

UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier 
@Test public void testDTManager() throws Exception { DelegationTokenManager tm=new DelegationTokenManager(new Text("foo"),DAY_IN_SECS,DAY_IN_SECS,DAY_IN_SECS,DAY_IN_SECS); tm.init(); Token token=tm.createToken(UserGroupInformation.getCurrentUser(),"foo"); Assert.assertNotNull(token); tm.verifyToken(token); Assert.assertTrue(tm.renewToken(token,"foo") > System.currentTimeMillis()); tm.cancelToken(token,"foo"); try { tm.verifyToken(token); Assert.fail(); } catch ( IOException ex) { } catch ( Exception ex) { Assert.fail(); } tm.destroy(); }

Class: org.apache.hadoop.security.token.delegation.web.TestWebDelegationToken

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testExternalDelegationTokenSecretManager() throws Exception { DummyDelegationTokenSecretManager secretMgr=new DummyDelegationTokenSecretManager(); final Server jetty=createJettyServer(); Context context=new Context(); context.setContextPath("/foo"); jetty.setHandler(context); context.addFilter(new FilterHolder(AFilter.class),"/*",0); context.addServlet(new ServletHolder(PingServlet.class),"/bar"); try { secretMgr.startThreads(); context.setAttribute(DelegationTokenAuthenticationFilter.DELEGATION_TOKEN_SECRET_MANAGER_ATTR,secretMgr); jetty.start(); URL authURL=new URL(getJettyURL() + "/foo/bar?authenticated=foo"); DelegationTokenAuthenticatedURL.Token token=new DelegationTokenAuthenticatedURL.Token(); DelegationTokenAuthenticatedURL aUrl=new DelegationTokenAuthenticatedURL(); aUrl.getDelegationToken(authURL,token,FOO_USER); Assert.assertNotNull(token.getDelegationToken()); Assert.assertEquals(new Text("fooKind"),token.getDelegationToken().getKind()); } finally { jetty.stop(); secretMgr.stopThreads(); } }

UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testDelegationTokenAuthenticatorCalls() throws Exception { final Server jetty=createJettyServer(); Context context=new Context(); context.setContextPath("/foo"); jetty.setHandler(context); context.addFilter(new FilterHolder(AFilter.class),"/*",0); context.addServlet(new ServletHolder(PingServlet.class),"/bar"); try { jetty.start(); URL nonAuthURL=new URL(getJettyURL() + "/foo/bar"); URL authURL=new URL(getJettyURL() + "/foo/bar?authenticated=foo"); URL authURL2=new URL(getJettyURL() + "/foo/bar?authenticated=bar"); DelegationTokenAuthenticatedURL.Token token=new DelegationTokenAuthenticatedURL.Token(); DelegationTokenAuthenticatedURL aUrl=new DelegationTokenAuthenticatedURL(); try { aUrl.getDelegationToken(nonAuthURL,token,FOO_USER); Assert.fail(); } catch ( Exception ex) { Assert.assertTrue(ex.getMessage().contains("401")); } aUrl.getDelegationToken(authURL,token,FOO_USER); Assert.assertNotNull(token.getDelegationToken()); Assert.assertEquals(new Text("token-kind"),token.getDelegationToken().getKind()); aUrl.renewDelegationToken(authURL,token); try { aUrl.renewDelegationToken(nonAuthURL,token); Assert.fail(); } catch ( Exception ex) { Assert.assertTrue(ex.getMessage().contains("401")); } aUrl.getDelegationToken(authURL,token,FOO_USER); try { aUrl.renewDelegationToken(authURL2,token); Assert.fail(); } catch ( Exception ex) { Assert.assertTrue(ex.getMessage().contains("403")); } aUrl.getDelegationToken(authURL,token,FOO_USER); aUrl.cancelDelegationToken(authURL,token); aUrl.getDelegationToken(authURL,token,FOO_USER); aUrl.cancelDelegationToken(nonAuthURL,token); aUrl.getDelegationToken(authURL,token,FOO_USER); try { aUrl.renewDelegationToken(nonAuthURL,token); } catch ( Exception ex) { Assert.assertTrue(ex.getMessage().contains("401")); } } finally { jetty.stop(); } }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testRawHttpCalls() throws Exception { final Server jetty=createJettyServer(); Context context=new Context(); context.setContextPath("/foo"); jetty.setHandler(context); context.addFilter(new FilterHolder(AFilter.class),"/*",0); context.addServlet(new ServletHolder(PingServlet.class),"/bar"); try { jetty.start(); URL nonAuthURL=new URL(getJettyURL() + "/foo/bar"); URL authURL=new URL(getJettyURL() + "/foo/bar?authenticated=foo"); HttpURLConnection conn=(HttpURLConnection)nonAuthURL.openConnection(); Assert.assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED,conn.getResponseCode()); conn=(HttpURLConnection)authURL.openConnection(); Assert.assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode()); URL url=new URL(nonAuthURL.toExternalForm() + "?op=GETDELEGATIONTOKEN"); conn=(HttpURLConnection)url.openConnection(); Assert.assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED,conn.getResponseCode()); url=new URL(authURL.toExternalForm() + "&op=GETDELEGATIONTOKEN&renewer=foo"); conn=(HttpURLConnection)url.openConnection(); Assert.assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode()); ObjectMapper mapper=new ObjectMapper(); Map map=mapper.readValue(conn.getInputStream(),Map.class); String dt=(String)((Map)map.get("Token")).get("urlString"); Assert.assertNotNull(dt); url=new URL(nonAuthURL.toExternalForm() + "?delegation=" + dt); conn=(HttpURLConnection)url.openConnection(); Assert.assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode()); url=new URL(authURL.toExternalForm() + "&delegation=" + dt); conn=(HttpURLConnection)url.openConnection(); Assert.assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode()); url=new URL(nonAuthURL.toExternalForm() + "?op=RENEWDELEGATIONTOKEN&token=" + dt); conn=(HttpURLConnection)url.openConnection(); conn.setRequestMethod("PUT"); Assert.assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED,conn.getResponseCode()); url=new URL(authURL.toExternalForm() + "&op=RENEWDELEGATIONTOKEN&token=" + dt); conn=(HttpURLConnection)url.openConnection(); conn.setRequestMethod("PUT"); Assert.assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode()); url=new URL(getJettyURL() + "/foo/bar?authenticated=bar&op=RENEWDELEGATIONTOKEN&token=" + dt); conn=(HttpURLConnection)url.openConnection(); conn.setRequestMethod("PUT"); Assert.assertEquals(HttpURLConnection.HTTP_FORBIDDEN,conn.getResponseCode()); url=new URL(nonAuthURL.toExternalForm() + "?op=CANCELDELEGATIONTOKEN&token=" + dt); conn=(HttpURLConnection)url.openConnection(); conn.setRequestMethod("PUT"); Assert.assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode()); url=new URL(nonAuthURL.toExternalForm() + "?op=CANCELDELEGATIONTOKEN&token=" + dt); conn=(HttpURLConnection)url.openConnection(); conn.setRequestMethod("PUT"); Assert.assertEquals(HttpURLConnection.HTTP_NOT_FOUND,conn.getResponseCode()); url=new URL(authURL.toExternalForm() + "&op=GETDELEGATIONTOKEN&renewer=foo"); conn=(HttpURLConnection)url.openConnection(); Assert.assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode()); mapper=new ObjectMapper(); map=mapper.readValue(conn.getInputStream(),Map.class); dt=(String)((Map)map.get("Token")).get("urlString"); Assert.assertNotNull(dt); url=new URL(authURL.toExternalForm() + "&op=CANCELDELEGATIONTOKEN&token=" + dt); conn=(HttpURLConnection)url.openConnection(); conn.setRequestMethod("PUT"); Assert.assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode()); } finally { jetty.stop(); } }

UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier 
@Test public void testKerberosDelegationTokenAuthenticator() throws Exception { org.apache.hadoop.conf.Configuration conf=new org.apache.hadoop.conf.Configuration(); conf.set("hadoop.security.authentication","kerberos"); UserGroupInformation.setConfiguration(conf); File testDir=new File("target/" + UUID.randomUUID().toString()); Assert.assertTrue(testDir.mkdirs()); MiniKdc kdc=new MiniKdc(MiniKdc.createConf(),testDir); final Server jetty=createJettyServer(); Context context=new Context(); context.setContextPath("/foo"); jetty.setHandler(context); context.addFilter(new FilterHolder(KDTAFilter.class),"/*",0); context.addServlet(new ServletHolder(UserServlet.class),"/bar"); try { kdc.start(); File keytabFile=new File(testDir,"test.keytab"); kdc.createPrincipal(keytabFile,"client","HTTP/localhost"); KDTAFilter.keytabFile=keytabFile.getAbsolutePath(); jetty.start(); final DelegationTokenAuthenticatedURL.Token token=new DelegationTokenAuthenticatedURL.Token(); final DelegationTokenAuthenticatedURL aUrl=new DelegationTokenAuthenticatedURL(); final URL url=new URL(getJettyURL() + "/foo/bar"); try { aUrl.getDelegationToken(url,token,FOO_USER); Assert.fail(); } catch ( AuthenticationException ex) { Assert.assertTrue(ex.getMessage().contains("GSSException")); } doAsKerberosUser("client",keytabFile.getAbsolutePath(),new Callable(){ @Override public Void call() throws Exception { aUrl.getDelegationToken(url,token,"client"); Assert.assertNotNull(token.getDelegationToken()); aUrl.renewDelegationToken(url,token); Assert.assertNotNull(token.getDelegationToken()); aUrl.getDelegationToken(url,token,FOO_USER); Assert.assertNotNull(token.getDelegationToken()); try { aUrl.renewDelegationToken(url,token); Assert.fail(); } catch ( Exception ex) { Assert.assertTrue(ex.getMessage().contains("403")); } aUrl.getDelegationToken(url,token,FOO_USER); aUrl.cancelDelegationToken(url,token); Assert.assertNull(token.getDelegationToken()); return null; } } ); } finally { jetty.stop(); kdc.stop(); } }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testFallbackToPseudoDelegationTokenAuthenticator() throws Exception { final Server jetty=createJettyServer(); Context context=new Context(); context.setContextPath("/foo"); jetty.setHandler(context); context.addFilter(new FilterHolder(PseudoDTAFilter.class),"/*",0); context.addServlet(new ServletHolder(UserServlet.class),"/bar"); try { jetty.start(); final URL url=new URL(getJettyURL() + "/foo/bar"); UserGroupInformation ugi=UserGroupInformation.createRemoteUser(FOO_USER); ugi.doAs(new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { DelegationTokenAuthenticatedURL.Token token=new DelegationTokenAuthenticatedURL.Token(); DelegationTokenAuthenticatedURL aUrl=new DelegationTokenAuthenticatedURL(); HttpURLConnection conn=aUrl.openConnection(url,token); Assert.assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode()); List ret=IOUtils.readLines(conn.getInputStream()); Assert.assertEquals(1,ret.size()); Assert.assertEquals(FOO_USER,ret.get(0)); aUrl.getDelegationToken(url,token,FOO_USER); Assert.assertNotNull(token.getDelegationToken()); Assert.assertEquals(new Text("token-kind"),token.getDelegationToken().getKind()); return null; } } ); } finally { jetty.stop(); } }

Class: org.apache.hadoop.service.TestCompositeService

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test(timeout=1000) public void testAddUninitedSiblingInInit() throws Throwable { CompositeService parent=new CompositeService("parent"); BreakableService sibling=new BreakableService(); parent.addService(new AddSiblingService(parent,sibling,STATE.INITED)); parent.init(new Configuration()); try { parent.start(); fail("Expected an exception, got " + parent); } catch ( ServiceStateException e) { } parent.stop(); assertEquals("Incorrect number of services",2,parent.getServices().size()); }

IterativeVerifier BranchVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testServiceStartup(){ ServiceManager serviceManager=new ServiceManager("ServiceManager"); for (int i=0; i < NUM_OF_SERVICES; i++) { CompositeServiceImpl service=new CompositeServiceImpl(i); if (i == FAILED_SERVICE_SEQ_NUMBER) { service.setThrowExceptionOnStart(true); } serviceManager.addTestService(service); } CompositeServiceImpl[] services=serviceManager.getServices().toArray(new CompositeServiceImpl[0]); Configuration conf=new Configuration(); serviceManager.init(conf); try { serviceManager.start(); fail("Exception should have been thrown due to startup failure of last service"); } catch ( ServiceTestRuntimeException e) { for (int i=0; i < NUM_OF_SERVICES - 1; i++) { if (i >= FAILED_SERVICE_SEQ_NUMBER && STOP_ONLY_STARTED_SERVICES) { assertEquals("Service state should have been ",STATE.INITED,services[NUM_OF_SERVICES - 1].getServiceState()); } else { assertEquals("Service state should have been ",STATE.STOPPED,services[i].getServiceState()); } } } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testRemoveService(){ CompositeService testService=new CompositeService("TestService"){ @Override public void serviceInit( Configuration conf){ Integer notAService=new Integer(0); assertFalse("Added an integer as a service",addIfService(notAService)); Service service1=new AbstractService("Service1"){ } ; addIfService(service1); Service service2=new AbstractService("Service2"){ } ; addIfService(service2); Service service3=new AbstractService("Service3"){ } ; addIfService(service3); removeService(service1); } } ; testService.init(new Configuration()); assertEquals("Incorrect number of services",2,testService.getServices().size()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=1000) public void testAddIfService(){ CompositeService testService=new CompositeService("TestService"){ Service service; @Override public void serviceInit( Configuration conf){ Integer notAService=new Integer(0); assertFalse("Added an integer as a service",addIfService(notAService)); service=new AbstractService("Service"){ } ; assertTrue("Unable to add a service",addIfService(service)); } } ; testService.init(new Configuration()); assertEquals("Incorrect number of services",1,testService.getServices().size()); }

Class: org.apache.hadoop.service.TestGlobalStateChangeListener

InternalCallVerifier IdentityVerifier EqualityVerifier HybridVerifier 
/** * Test that the {@link BreakableStateChangeListener} is picking up * the state changes and that its last event field is as expected. */ @Test public void testEventHistory(){ register(); BreakableService service=new BreakableService(); assertListenerState(listener,Service.STATE.NOTINITED); assertEquals(0,listener.getEventCount()); service.init(new Configuration()); assertListenerState(listener,Service.STATE.INITED); assertSame(service,listener.getLastService()); assertListenerEventCount(listener,1); service.start(); assertListenerState(listener,Service.STATE.STARTED); assertListenerEventCount(listener,2); service.stop(); assertListenerState(listener,Service.STATE.STOPPED); assertListenerEventCount(listener,3); }

Class: org.apache.hadoop.service.TestServiceLifecycle

UtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Show that if the service failed during an init * operation, stop was called. */ @Test public void testStopFailingInitAndStop() throws Throwable { BreakableService svc=new BreakableService(true,false,true); svc.registerServiceListener(new LoggingStateChangeListener()); try { svc.init(new Configuration()); fail("Expected a failure, got " + svc); } catch ( BreakableService.BrokenLifecycleEvent e) { assertEquals(Service.STATE.INITED,e.state); } assertServiceStateStopped(svc); assertEquals(Service.STATE.INITED,svc.getFailureState()); Throwable failureCause=svc.getFailureCause(); assertNotNull("Null failure cause in " + svc,failureCause); BreakableService.BrokenLifecycleEvent cause=(BreakableService.BrokenLifecycleEvent)failureCause; assertNotNull("null state in " + cause + " raised by "+ svc,cause.state); assertEquals(Service.STATE.INITED,cause.state); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * This test verifies that you can block waiting for something to happen * and use notifications to manage it * @throws Throwable on a failure */ @Test public void testListenerWithNotifications() throws Throwable { AsyncSelfTerminatingService service=new AsyncSelfTerminatingService(2000); NotifyingListener listener=new NotifyingListener(); service.registerServiceListener(listener); service.init(new Configuration()); service.start(); assertServiceInState(service,Service.STATE.STARTED); long start=System.currentTimeMillis(); synchronized (listener) { listener.wait(20000); } long duration=System.currentTimeMillis() - start; assertEquals(Service.STATE.STOPPED,listener.notifyingState); assertServiceInState(service,Service.STATE.STOPPED); assertTrue("Duration of " + duration + " too long",duration < 10000); }

Class: org.apache.hadoop.streaming.TestAutoInputFormat

IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@SuppressWarnings({"unchecked","deprecation"}) @Test public void testFormat() throws IOException { JobConf job=new JobConf(conf); FileSystem fs=FileSystem.getLocal(conf); Path dir=new Path(System.getProperty("test.build.data",".") + "/mapred"); Path txtFile=new Path(dir,"auto.txt"); Path seqFile=new Path(dir,"auto.seq"); fs.delete(dir,true); FileInputFormat.setInputPaths(job,dir); Writer txtWriter=new OutputStreamWriter(fs.create(txtFile)); try { for (int i=0; i < LINES_COUNT; i++) { txtWriter.write("" + (10 * i)); txtWriter.write("\n"); } } finally { txtWriter.close(); } SequenceFile.Writer seqWriter=SequenceFile.createWriter(fs,conf,seqFile,IntWritable.class,LongWritable.class); try { for (int i=0; i < RECORDS_COUNT; i++) { IntWritable key=new IntWritable(11 * i); LongWritable value=new LongWritable(12 * i); seqWriter.append(key,value); } } finally { seqWriter.close(); } AutoInputFormat format=new AutoInputFormat(); InputSplit[] splits=format.getSplits(job,SPLITS_COUNT); for ( InputSplit split : splits) { RecordReader reader=format.getRecordReader(split,job,Reporter.NULL); Object key=reader.createKey(); Object value=reader.createValue(); try { while (reader.next(key,value)) { if (key instanceof LongWritable) { assertEquals("Wrong value class.",Text.class,value.getClass()); assertTrue("Invalid value",Integer.parseInt(((Text)value).toString()) % 10 == 0); } else { assertEquals("Wrong key class.",IntWritable.class,key.getClass()); assertEquals("Wrong value class.",LongWritable.class,value.getClass()); assertTrue("Invalid key.",((IntWritable)key).get() % 11 == 0); assertTrue("Invalid value.",((LongWritable)value).get() % 12 == 0); } } } finally { reader.close(); } } }

Class: org.apache.hadoop.streaming.TestDumpTypedBytes

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testDumping() throws Exception { Configuration conf=new Configuration(); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); FileSystem fs=cluster.getFileSystem(); PrintStream psBackup=System.out; ByteArrayOutputStream out=new ByteArrayOutputStream(); PrintStream psOut=new PrintStream(out); System.setOut(psOut); DumpTypedBytes dumptb=new DumpTypedBytes(conf); try { Path root=new Path("/typedbytestest"); assertTrue(fs.mkdirs(root)); assertTrue(fs.exists(root)); OutputStreamWriter writer=new OutputStreamWriter(fs.create(new Path(root,"test.txt"))); try { for (int i=0; i < 100; i++) { writer.write("" + (10 * i) + "\n"); } } finally { writer.close(); } String[] args=new String[1]; args[0]="/typedbytestest"; int ret=dumptb.run(args); assertEquals("Return value != 0.",0,ret); ByteArrayInputStream in=new ByteArrayInputStream(out.toByteArray()); TypedBytesInput tbinput=new TypedBytesInput(new DataInputStream(in)); int counter=0; Object key=tbinput.read(); while (key != null) { assertEquals(Long.class,key.getClass()); Object value=tbinput.read(); assertEquals(String.class,value.getClass()); assertTrue("Invalid output.",Integer.parseInt(value.toString()) % 10 == 0); counter++; key=tbinput.read(); } assertEquals("Wrong number of outputs.",100,counter); } finally { try { fs.close(); } catch ( Exception e) { } System.setOut(psBackup); cluster.shutdown(); } }

Class: org.apache.hadoop.streaming.TestLoadTypedBytes

IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testLoading() throws Exception { Configuration conf=new Configuration(); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); FileSystem fs=cluster.getFileSystem(); ByteArrayOutputStream out=new ByteArrayOutputStream(); TypedBytesOutput tboutput=new TypedBytesOutput(new DataOutputStream(out)); for (int i=0; i < 100; i++) { tboutput.write(new Long(i)); tboutput.write("" + (10 * i)); } InputStream isBackup=System.in; ByteArrayInputStream in=new ByteArrayInputStream(out.toByteArray()); System.setIn(in); LoadTypedBytes loadtb=new LoadTypedBytes(conf); try { Path root=new Path("/typedbytestest"); assertTrue(fs.mkdirs(root)); assertTrue(fs.exists(root)); String[] args=new String[1]; args[0]="/typedbytestest/test.seq"; int ret=loadtb.run(args); assertEquals("Return value != 0.",0,ret); Path file=new Path(root,"test.seq"); assertTrue(fs.exists(file)); SequenceFile.Reader reader=new SequenceFile.Reader(fs,file,conf); int counter=0; TypedBytesWritable key=new TypedBytesWritable(); TypedBytesWritable value=new TypedBytesWritable(); while (reader.next(key,value)) { assertEquals(Long.class,key.getValue().getClass()); assertEquals(String.class,value.getValue().getClass()); assertTrue("Invalid record.",Integer.parseInt(value.toString()) % 10 == 0); counter++; } assertEquals("Wrong number of records.",100,counter); } finally { try { fs.close(); } catch ( Exception e) { } System.setIn(isBackup); cluster.shutdown(); } }

Class: org.apache.hadoop.streaming.TestStreaming

TestInitializer BooleanVerifier HybridVerifier 
@Before public void setUp() throws IOException { UtilTest.recursiveDelete(TEST_DIR); assertTrue("Creating " + TEST_DIR,TEST_DIR.mkdirs()); args.clear(); }

Class: org.apache.hadoop.streaming.TestStreamingBackground

TestInitializer BooleanVerifier HybridVerifier 
@Before public void setUp() throws IOException { UtilTest.recursiveDelete(TEST_DIR); assertTrue(TEST_DIR.mkdirs()); FileOutputStream out=new FileOutputStream(INPUT_FILE.getAbsoluteFile()); out.write("hello\n".getBytes()); out.close(); }

Class: org.apache.hadoop.streaming.TestStreamingExitStatus

TestInitializer BooleanVerifier HybridVerifier 
@Before public void setUp() throws IOException { UtilTest.recursiveDelete(TEST_DIR); assertTrue(TEST_DIR.mkdirs()); FileOutputStream out=new FileOutputStream(INPUT_FILE.getAbsoluteFile()); out.write("hello\n".getBytes()); out.close(); }

Class: org.apache.hadoop.streaming.TestTypedBytesStreaming

TestCleaner TestInitializer HybridVerifier 
@Before @After public void cleanupOutput() throws Exception { FileUtil.fullyDelete(OUTPUT_DIR.getAbsoluteFile()); INPUT_FILE.delete(); createInput(); }

Class: org.apache.hadoop.test.TestMultithreadedTestUtil

BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testNoErrors() throws Exception { final AtomicInteger threadsRun=new AtomicInteger(); TestContext ctx=new TestContext(); for (int i=0; i < 3; i++) { ctx.addThread(new TestingThread(ctx){ @Override public void doWork() throws Exception { threadsRun.incrementAndGet(); } } ); } assertEquals(0,threadsRun.get()); ctx.startThreads(); long st=Time.now(); ctx.waitFor(30000); long et=Time.now(); assertEquals(3,threadsRun.get()); assertTrue("Test took " + (et - st) + "ms",et - st < 5000); }

UtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testThreadThrowsCheckedException() throws Exception { TestContext ctx=new TestContext(); ctx.addThread(new TestingThread(ctx){ @Override public void doWork() throws Exception { throw new IOException("my ioe"); } } ); ctx.startThreads(); long st=Time.now(); try { ctx.waitFor(30000); fail("waitFor did not throw"); } catch ( RuntimeException rte) { assertEquals("my ioe",rte.getCause().getMessage()); } long et=Time.now(); assertTrue("Test took " + (et - st) + "ms",et - st < 5000); }

UtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testThreadFails() throws Exception { TestContext ctx=new TestContext(); ctx.addThread(new TestingThread(ctx){ @Override public void doWork() throws Exception { fail(FAIL_MSG); } } ); ctx.startThreads(); long st=Time.now(); try { ctx.waitFor(30000); fail("waitFor did not throw"); } catch ( RuntimeException rte) { assertEquals(FAIL_MSG,rte.getCause().getMessage()); } long et=Time.now(); assertTrue("Test took " + (et - st) + "ms",et - st < 5000); }

Class: org.apache.hadoop.test.TestTimedOutTestsListener

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=500) public void testThreadDumpAndDeadlocks() throws Exception { new Deadlock(); String s=null; while (true) { s=TimedOutTestsListener.buildDeadlockInfo(); if (s != null) break; Thread.sleep(100); } Assert.assertEquals(3,countStringOccurrences(s,"BLOCKED")); Failure failure=new Failure(null,new Exception(TimedOutTestsListener.TEST_TIMED_OUT_PREFIX)); StringWriter writer=new StringWriter(); new TimedOutTestsListener(new PrintWriter(writer)).testFailure(failure); String out=writer.toString(); Assert.assertTrue(out.contains("THREAD DUMP")); Assert.assertTrue(out.contains("DEADLOCKS DETECTED")); System.out.println(out); }

Class: org.apache.hadoop.tools.TestCopyListing

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testBuildListing(){ FileSystem fs=null; try { fs=FileSystem.get(getConf()); List srcPaths=new ArrayList(); Path p1=new Path("/tmp/in/1"); Path p2=new Path("/tmp/in/2"); Path p3=new Path("/tmp/in2/2"); Path target=new Path("/tmp/out/1"); srcPaths.add(p1.getParent()); srcPaths.add(p3.getParent()); TestDistCpUtils.createFile(fs,"/tmp/in/1"); TestDistCpUtils.createFile(fs,"/tmp/in/2"); TestDistCpUtils.createFile(fs,"/tmp/in2/2"); fs.mkdirs(target); OutputStream out=fs.create(p1); out.write("ABC".getBytes()); out.close(); out=fs.create(p2); out.write("DEF".getBytes()); out.close(); out=fs.create(p3); out.write("GHIJ".getBytes()); out.close(); Path listingFile=new Path("/tmp/file"); DistCpOptions options=new DistCpOptions(srcPaths,target); options.setSyncFolder(true); CopyListing listing=new SimpleCopyListing(getConf(),CREDENTIALS); try { listing.buildListing(listingFile,options); Assert.fail("Duplicates not detected"); } catch ( DuplicateFileException ignore) { } Assert.assertEquals(listing.getBytesToCopy(),10); Assert.assertEquals(listing.getNumberOfPaths(),3); TestDistCpUtils.delete(fs,"/tmp"); try { listing.buildListing(listingFile,options); Assert.fail("Invalid input not detected"); } catch ( InvalidInputException ignore) { } TestDistCpUtils.delete(fs,"/tmp"); } catch ( IOException e) { LOG.error("Exception encountered ",e); Assert.fail("Test build listing failed"); } finally { TestDistCpUtils.delete(fs,"/tmp"); } }

NullVerifier EqualityVerifier HybridVerifier 
@Test public void testFailOnCloseError() throws IOException { File inFile=File.createTempFile("TestCopyListingIn",null); inFile.deleteOnExit(); File outFile=File.createTempFile("TestCopyListingOut",null); outFile.deleteOnExit(); List srcs=new ArrayList(); srcs.add(new Path(inFile.toURI())); Exception expectedEx=new IOException("boom"); SequenceFile.Writer writer=mock(SequenceFile.Writer.class); doThrow(expectedEx).when(writer).close(); SimpleCopyListing listing=new SimpleCopyListing(getConf(),CREDENTIALS); DistCpOptions options=new DistCpOptions(srcs,new Path(outFile.toURI())); Exception actualEx=null; try { listing.doBuildListing(writer,options); } catch ( Exception e) { actualEx=e; } Assert.assertNotNull("close writer didn't fail",actualEx); Assert.assertEquals(expectedEx,actualEx); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testSkipCopy() throws Exception { SimpleCopyListing listing=new SimpleCopyListing(getConf(),CREDENTIALS){ @Override protected boolean shouldCopy( Path path, DistCpOptions options){ return !path.getName().equals(FileOutputCommitter.SUCCEEDED_FILE_NAME); } } ; FileSystem fs=FileSystem.get(getConf()); List srcPaths=new ArrayList(); srcPaths.add(new Path("/tmp/in4/1")); srcPaths.add(new Path("/tmp/in4/2")); Path target=new Path("/tmp/out4/1"); TestDistCpUtils.createFile(fs,"/tmp/in4/1/_SUCCESS"); TestDistCpUtils.createFile(fs,"/tmp/in4/1/file"); TestDistCpUtils.createFile(fs,"/tmp/in4/2"); fs.mkdirs(target); DistCpOptions options=new DistCpOptions(srcPaths,target); Path listingFile=new Path("/tmp/list4"); listing.buildListing(listingFile,options); Assert.assertEquals(listing.getNumberOfPaths(),3); SequenceFile.Reader reader=new SequenceFile.Reader(getConf(),SequenceFile.Reader.file(listingFile)); CopyListingFileStatus fileStatus=new CopyListingFileStatus(); Text relativePath=new Text(); Assert.assertTrue(reader.next(relativePath,fileStatus)); Assert.assertEquals(relativePath.toString(),"/1"); Assert.assertTrue(reader.next(relativePath,fileStatus)); Assert.assertEquals(relativePath.toString(),"/1/file"); Assert.assertTrue(reader.next(relativePath,fileStatus)); Assert.assertEquals(relativePath.toString(),"/2"); Assert.assertFalse(reader.next(relativePath,fileStatus)); }

APIUtilityVerifier UtilityVerifier BooleanVerifier HybridVerifier 
@Test(timeout=10000) public void testBuildListingForSingleFile(){ FileSystem fs=null; String testRootString="/singleFileListing"; Path testRoot=new Path(testRootString); SequenceFile.Reader reader=null; try { fs=FileSystem.get(getConf()); if (fs.exists(testRoot)) TestDistCpUtils.delete(fs,testRootString); Path sourceFile=new Path(testRoot,"/source/foo/bar/source.txt"); Path decoyFile=new Path(testRoot,"/target/moo/source.txt"); Path targetFile=new Path(testRoot,"/target/moo/target.txt"); TestDistCpUtils.createFile(fs,sourceFile.toString()); TestDistCpUtils.createFile(fs,decoyFile.toString()); TestDistCpUtils.createFile(fs,targetFile.toString()); List srcPaths=new ArrayList(); srcPaths.add(sourceFile); DistCpOptions options=new DistCpOptions(srcPaths,targetFile); CopyListing listing=new SimpleCopyListing(getConf(),CREDENTIALS); final Path listFile=new Path(testRoot,"/tmp/fileList.seq"); listing.buildListing(listFile,options); reader=new SequenceFile.Reader(getConf(),SequenceFile.Reader.file(listFile)); CopyListingFileStatus fileStatus=new CopyListingFileStatus(); Text relativePath=new Text(); Assert.assertTrue(reader.next(relativePath,fileStatus)); Assert.assertTrue(relativePath.toString().equals("")); } catch ( Exception e) { Assert.fail("Unexpected exception encountered."); LOG.error("Unexpected exception: ",e); } finally { TestDistCpUtils.delete(fs,testRootString); IOUtils.closeStream(reader); } }

Class: org.apache.hadoop.tools.TestExternalCall

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * test main method of DistCp. Method should to call System.exit(). */ @Test public void testCleanupTestViaToolRunner() throws IOException, InterruptedException { Configuration conf=getConf(); Path stagingDir=JobSubmissionFiles.getStagingDir(new Cluster(conf),conf); stagingDir.getFileSystem(conf).mkdirs(stagingDir); Path soure=createFile("tmp.txt"); Path target=createFile("target.txt"); try { String[] arg={target.toString(),soure.toString()}; DistCp.main(arg); Assert.fail(); } catch ( ExitException t) { Assert.assertTrue(fs.exists(target)); Assert.assertEquals(t.status,0); Assert.assertEquals(stagingDir.getFileSystem(conf).listStatus(stagingDir).length,0); } }

Class: org.apache.hadoop.tools.TestHadoopArchives

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testCopyToLocal() throws Exception { final String fullHarPathStr=makeArchive(); final String tmpDir=System.getProperty("test.build.data","build/test/data") + "/work-dir/har-fs-tmp"; final Path tmpPath=new Path(tmpDir); final LocalFileSystem localFs=FileSystem.getLocal(new Configuration()); localFs.delete(tmpPath,true); localFs.mkdirs(tmpPath); assertTrue(localFs.exists(tmpPath)); final HarFileSystem harFileSystem=new HarFileSystem(fs); try { final URI harUri=new URI(fullHarPathStr); harFileSystem.initialize(harUri,fs.getConf()); final Path sourcePath=new Path(fullHarPathStr + Path.SEPARATOR + "a"); final Path targetPath=new Path(tmpPath,"straus"); harFileSystem.copyToLocalFile(false,sourcePath,targetPath); FileStatus straus=localFs.getFileStatus(targetPath); assertEquals(1,straus.getLen()); } finally { harFileSystem.close(); localFs.delete(tmpPath,true); } }

Class: org.apache.hadoop.tools.TestIntegration

UtilityVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
@Test(timeout=100000) public void testOverwrite(){ byte[] contents1="contents1".getBytes(); byte[] contents2="contents2".getBytes(); Assert.assertEquals(contents1.length,contents2.length); try { addEntries(listFile,"srcdir"); createWithContents("srcdir/file1",contents1); createWithContents("dstdir/file1",contents2); Path target=new Path(root + "/dstdir"); runTest(listFile,target,false,false,false,true); checkResult(target,1,"file1"); FSDataInputStream is=fs.open(new Path(root + "/dstdir/file1")); byte[] dstContents=new byte[contents1.length]; is.readFully(dstContents); is.close(); Assert.assertArrayEquals(contents1,dstContents); } catch ( IOException e) { LOG.error("Exception encountered while running distcp",e); Assert.fail("distcp failure"); } finally { TestDistCpUtils.delete(fs,root); TestDistCpUtils.delete(fs,"target/tmp1"); } }

APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test(timeout=100000) public void testCleanup(){ try { Path sourcePath=new Path("noscheme:///file"); List sources=new ArrayList(); sources.add(sourcePath); DistCpOptions options=new DistCpOptions(sources,target); Configuration conf=getConf(); Path stagingDir=JobSubmissionFiles.getStagingDir(new Cluster(conf),conf); stagingDir.getFileSystem(conf).mkdirs(stagingDir); try { new DistCp(conf,options).execute(); } catch ( Throwable t) { Assert.assertEquals(stagingDir.getFileSystem(conf).listStatus(stagingDir).length,0); } } catch ( Exception e) { LOG.error("Exception encountered ",e); Assert.fail("testCleanup failed " + e.getMessage()); } }

Class: org.apache.hadoop.tools.TestJMXGet

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * test JMX connection to DataNode.. * @throws Exception */ @Test public void testDataNode() throws Exception { int numDatanodes=2; cluster=new MiniDFSCluster.Builder(config).numDataNodes(numDatanodes).build(); cluster.waitActive(); DFSTestUtil.createFile(cluster.getFileSystem(),new Path("/test"),fileSize,fileSize,blockSize,(short)2,seed); JMXGet jmx=new JMXGet(); String serviceName="DataNode"; jmx.setService(serviceName); jmx.init(); assertEquals(fileSize,Integer.parseInt(jmx.getValue("BytesWritten"))); cluster.shutdown(); MBeanServerConnection mbsc=ManagementFactory.getPlatformMBeanServer(); ObjectName query=new ObjectName("Hadoop:service=" + serviceName + ",*"); Set names=mbsc.queryNames(query,null); assertTrue("No beans should be registered for " + serviceName,names.isEmpty()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * test JMX connection to NameNode.. * @throws Exception */ @Test public void testNameNode() throws Exception { int numDatanodes=2; cluster=new MiniDFSCluster.Builder(config).numDataNodes(numDatanodes).build(); cluster.waitActive(); DFSTestUtil.createFile(cluster.getFileSystem(),new Path("/test1"),fileSize,fileSize,blockSize,(short)2,seed); JMXGet jmx=new JMXGet(); String serviceName="NameNode"; jmx.setService(serviceName); jmx.init(); assertTrue("error printAllValues",checkPrintAllValues(jmx)); assertEquals(numDatanodes,Integer.parseInt(jmx.getValue("NumLiveDataNodes"))); assertGauge("CorruptBlocks",Long.parseLong(jmx.getValue("CorruptBlocks")),getMetrics("FSNamesystem")); assertEquals(numDatanodes,Integer.parseInt(jmx.getValue("NumOpenConnections"))); cluster.shutdown(); MBeanServerConnection mbsc=ManagementFactory.getPlatformMBeanServer(); ObjectName query=new ObjectName("Hadoop:service=" + serviceName + ",*"); Set names=mbsc.queryNames(query,null); assertTrue("No beans should be registered for " + serviceName,names.isEmpty()); }

Class: org.apache.hadoop.tools.TestOptionsParser

NullVerifier EqualityVerifier HybridVerifier 
@Test public void testLogPath(){ DistCpOptions options=OptionsParser.parse(new String[]{"hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"}); Assert.assertNull(options.getLogPath()); options=OptionsParser.parse(new String[]{"-log","hdfs://localhost:8020/logs","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"}); Assert.assertEquals(options.getLogPath(),new Path("hdfs://localhost:8020/logs")); }

UtilityVerifier BooleanVerifier HybridVerifier 
@Test public void testParseAtomicCommit(){ DistCpOptions options=OptionsParser.parse(new String[]{"hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"}); Assert.assertFalse(options.shouldAtomicCommit()); options=OptionsParser.parse(new String[]{"-atomic","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"}); Assert.assertTrue(options.shouldAtomicCommit()); try { OptionsParser.parse(new String[]{"-atomic","-update","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"}); Assert.fail("Atomic and sync folders were allowed"); } catch ( IllegalArgumentException ignore) { } }

UtilityVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testParseWorkPath(){ DistCpOptions options=OptionsParser.parse(new String[]{"hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"}); Assert.assertNull(options.getAtomicWorkPath()); options=OptionsParser.parse(new String[]{"-atomic","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"}); Assert.assertNull(options.getAtomicWorkPath()); options=OptionsParser.parse(new String[]{"-atomic","-tmp","hdfs://localhost:8020/work","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"}); Assert.assertEquals(options.getAtomicWorkPath(),new Path("hdfs://localhost:8020/work")); try { OptionsParser.parse(new String[]{"-tmp","hdfs://localhost:8020/work","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"}); Assert.fail("work path was allowed without -atomic switch"); } catch ( IllegalArgumentException ignore) { } }

UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testParseMaps(){ DistCpOptions options=OptionsParser.parse(new String[]{"hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"}); Assert.assertEquals(options.getMaxMaps(),DistCpConstants.DEFAULT_MAPS); options=OptionsParser.parse(new String[]{"-m","1","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"}); Assert.assertEquals(options.getMaxMaps(),1); options=OptionsParser.parse(new String[]{"-m","0","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"}); Assert.assertEquals(options.getMaxMaps(),1); try { OptionsParser.parse(new String[]{"-m","hello","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"}); Assert.fail("Non numberic map parsed"); } catch ( IllegalArgumentException ignore) { } try { OptionsParser.parse(new String[]{"-mapredXslConf","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"}); Assert.fail("Non numberic map parsed"); } catch ( IllegalArgumentException ignore) { } }

UtilityVerifier BooleanVerifier HybridVerifier 
@Test public void testParseOverwrite(){ DistCpOptions options=OptionsParser.parse(new String[]{"hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"}); Assert.assertFalse(options.shouldOverwrite()); options=OptionsParser.parse(new String[]{"-overwrite","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"}); Assert.assertTrue(options.shouldOverwrite()); try { OptionsParser.parse(new String[]{"-update","-overwrite","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"}); Assert.fail("Update and overwrite aren't allowed together"); } catch ( IllegalArgumentException ignore) { } }

UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
@Test public void testParseDeleteMissing(){ DistCpOptions options=OptionsParser.parse(new String[]{"hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"}); Assert.assertFalse(options.shouldDeleteMissing()); options=OptionsParser.parse(new String[]{"-update","-delete","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"}); Assert.assertTrue(options.shouldSyncFolder()); Assert.assertTrue(options.shouldDeleteMissing()); options=OptionsParser.parse(new String[]{"-overwrite","-delete","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"}); Assert.assertTrue(options.shouldOverwrite()); Assert.assertTrue(options.shouldDeleteMissing()); try { OptionsParser.parse(new String[]{"-atomic","-delete","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"}); Assert.fail("Atomic and delete folders were allowed"); } catch ( IllegalArgumentException ignore) { } }

IdentityVerifier EqualityVerifier HybridVerifier 
@Test public void testToString(){ DistCpOptions option=new DistCpOptions(new Path("abc"),new Path("xyz")); String val="DistCpOptions{atomicCommit=false, syncFolder=false, deleteMissing=false, " + "ignoreFailures=false, maxMaps=20, sslConfigurationFile='null', copyStrategy='uniformsize', " + "sourceFileListing=abc, sourcePaths=null, targetPath=xyz, targetPathExists=true, "+ "preserveRawXattrs=false}"; Assert.assertEquals(val,option.toString()); Assert.assertNotSame(DistCpOptionSwitch.ATOMIC_COMMIT.toString(),DistCpOptionSwitch.ATOMIC_COMMIT.name()); }

NullVerifier EqualityVerifier HybridVerifier 
@Test public void testParseSSLConf(){ DistCpOptions options=OptionsParser.parse(new String[]{"hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"}); Assert.assertNull(options.getSslConfigurationFile()); options=OptionsParser.parse(new String[]{"-mapredSslConf","/tmp/ssl-client.xml","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"}); Assert.assertEquals(options.getSslConfigurationFile(),"/tmp/ssl-client.xml"); }

UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testPreserve(){ DistCpOptions options=OptionsParser.parse(new String[]{"-f","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"}); Assert.assertFalse(options.shouldPreserve(FileAttribute.BLOCKSIZE)); Assert.assertFalse(options.shouldPreserve(FileAttribute.REPLICATION)); Assert.assertFalse(options.shouldPreserve(FileAttribute.PERMISSION)); Assert.assertFalse(options.shouldPreserve(FileAttribute.USER)); Assert.assertFalse(options.shouldPreserve(FileAttribute.GROUP)); Assert.assertFalse(options.shouldPreserve(FileAttribute.CHECKSUMTYPE)); options=OptionsParser.parse(new String[]{"-p","-f","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"}); Assert.assertTrue(options.shouldPreserve(FileAttribute.BLOCKSIZE)); Assert.assertTrue(options.shouldPreserve(FileAttribute.REPLICATION)); Assert.assertTrue(options.shouldPreserve(FileAttribute.PERMISSION)); Assert.assertTrue(options.shouldPreserve(FileAttribute.USER)); Assert.assertTrue(options.shouldPreserve(FileAttribute.GROUP)); Assert.assertTrue(options.shouldPreserve(FileAttribute.CHECKSUMTYPE)); Assert.assertFalse(options.shouldPreserve(FileAttribute.ACL)); Assert.assertFalse(options.shouldPreserve(FileAttribute.XATTR)); options=OptionsParser.parse(new String[]{"-p","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"}); Assert.assertTrue(options.shouldPreserve(FileAttribute.BLOCKSIZE)); Assert.assertTrue(options.shouldPreserve(FileAttribute.REPLICATION)); Assert.assertTrue(options.shouldPreserve(FileAttribute.PERMISSION)); Assert.assertTrue(options.shouldPreserve(FileAttribute.USER)); Assert.assertTrue(options.shouldPreserve(FileAttribute.GROUP)); Assert.assertTrue(options.shouldPreserve(FileAttribute.CHECKSUMTYPE)); Assert.assertFalse(options.shouldPreserve(FileAttribute.ACL)); Assert.assertFalse(options.shouldPreserve(FileAttribute.XATTR)); options=OptionsParser.parse(new String[]{"-pbr","-f","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"}); Assert.assertTrue(options.shouldPreserve(FileAttribute.BLOCKSIZE)); Assert.assertTrue(options.shouldPreserve(FileAttribute.REPLICATION)); Assert.assertFalse(options.shouldPreserve(FileAttribute.PERMISSION)); Assert.assertFalse(options.shouldPreserve(FileAttribute.USER)); Assert.assertFalse(options.shouldPreserve(FileAttribute.GROUP)); Assert.assertFalse(options.shouldPreserve(FileAttribute.CHECKSUMTYPE)); Assert.assertFalse(options.shouldPreserve(FileAttribute.ACL)); Assert.assertFalse(options.shouldPreserve(FileAttribute.XATTR)); options=OptionsParser.parse(new String[]{"-pbrgup","-f","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"}); Assert.assertTrue(options.shouldPreserve(FileAttribute.BLOCKSIZE)); Assert.assertTrue(options.shouldPreserve(FileAttribute.REPLICATION)); Assert.assertTrue(options.shouldPreserve(FileAttribute.PERMISSION)); Assert.assertTrue(options.shouldPreserve(FileAttribute.USER)); Assert.assertTrue(options.shouldPreserve(FileAttribute.GROUP)); Assert.assertFalse(options.shouldPreserve(FileAttribute.CHECKSUMTYPE)); Assert.assertFalse(options.shouldPreserve(FileAttribute.ACL)); Assert.assertFalse(options.shouldPreserve(FileAttribute.XATTR)); options=OptionsParser.parse(new String[]{"-pbrgupcax","-f","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"}); Assert.assertTrue(options.shouldPreserve(FileAttribute.BLOCKSIZE)); Assert.assertTrue(options.shouldPreserve(FileAttribute.REPLICATION)); Assert.assertTrue(options.shouldPreserve(FileAttribute.PERMISSION)); Assert.assertTrue(options.shouldPreserve(FileAttribute.USER)); Assert.assertTrue(options.shouldPreserve(FileAttribute.GROUP)); Assert.assertTrue(options.shouldPreserve(FileAttribute.CHECKSUMTYPE)); Assert.assertTrue(options.shouldPreserve(FileAttribute.ACL)); Assert.assertTrue(options.shouldPreserve(FileAttribute.XATTR)); options=OptionsParser.parse(new String[]{"-pc","-f","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"}); Assert.assertFalse(options.shouldPreserve(FileAttribute.BLOCKSIZE)); Assert.assertFalse(options.shouldPreserve(FileAttribute.REPLICATION)); Assert.assertFalse(options.shouldPreserve(FileAttribute.PERMISSION)); Assert.assertFalse(options.shouldPreserve(FileAttribute.USER)); Assert.assertFalse(options.shouldPreserve(FileAttribute.GROUP)); Assert.assertTrue(options.shouldPreserve(FileAttribute.CHECKSUMTYPE)); Assert.assertFalse(options.shouldPreserve(FileAttribute.ACL)); Assert.assertFalse(options.shouldPreserve(FileAttribute.XATTR)); options=OptionsParser.parse(new String[]{"-p","-f","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"}); int i=0; Iterator attribIterator=options.preserveAttributes(); while (attribIterator.hasNext()) { attribIterator.next(); i++; } Assert.assertEquals(i,6); try { OptionsParser.parse(new String[]{"-pabcd","-f","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target"}); Assert.fail("Invalid preserve attribute"); } catch ( IllegalArgumentException ignore) { } catch ( NoSuchElementException ignore) { } options=OptionsParser.parse(new String[]{"-f","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"}); Assert.assertFalse(options.shouldPreserve(FileAttribute.PERMISSION)); options.preserve(FileAttribute.PERMISSION); Assert.assertTrue(options.shouldPreserve(FileAttribute.PERMISSION)); options.preserve(FileAttribute.PERMISSION); Assert.assertTrue(options.shouldPreserve(FileAttribute.PERMISSION)); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testOptionsAppendToConf(){ Configuration conf=new Configuration(); Assert.assertFalse(conf.getBoolean(DistCpOptionSwitch.IGNORE_FAILURES.getConfigLabel(),false)); Assert.assertFalse(conf.getBoolean(DistCpOptionSwitch.ATOMIC_COMMIT.getConfigLabel(),false)); DistCpOptions options=OptionsParser.parse(new String[]{"-atomic","-i","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"}); options.appendToConf(conf); Assert.assertTrue(conf.getBoolean(DistCpOptionSwitch.IGNORE_FAILURES.getConfigLabel(),false)); Assert.assertTrue(conf.getBoolean(DistCpOptionSwitch.ATOMIC_COMMIT.getConfigLabel(),false)); Assert.assertEquals(conf.getInt(DistCpOptionSwitch.BANDWIDTH.getConfigLabel(),-1),DistCpConstants.DEFAULT_BANDWIDTH_MB); conf=new Configuration(); Assert.assertFalse(conf.getBoolean(DistCpOptionSwitch.SYNC_FOLDERS.getConfigLabel(),false)); Assert.assertFalse(conf.getBoolean(DistCpOptionSwitch.DELETE_MISSING.getConfigLabel(),false)); Assert.assertEquals(conf.get(DistCpOptionSwitch.PRESERVE_STATUS.getConfigLabel()),null); options=OptionsParser.parse(new String[]{"-update","-delete","-pu","-bandwidth","11","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"}); options.appendToConf(conf); Assert.assertTrue(conf.getBoolean(DistCpOptionSwitch.SYNC_FOLDERS.getConfigLabel(),false)); Assert.assertTrue(conf.getBoolean(DistCpOptionSwitch.DELETE_MISSING.getConfigLabel(),false)); Assert.assertEquals(conf.get(DistCpOptionSwitch.PRESERVE_STATUS.getConfigLabel()),"U"); Assert.assertEquals(conf.getInt(DistCpOptionSwitch.BANDWIDTH.getConfigLabel(),-1),11); }

InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier 
@Test public void testOptionsSwitchAddToConf(){ Configuration conf=new Configuration(); Assert.assertNull(conf.get(DistCpOptionSwitch.ATOMIC_COMMIT.getConfigLabel())); DistCpOptionSwitch.addToConf(conf,DistCpOptionSwitch.ATOMIC_COMMIT); Assert.assertTrue(conf.getBoolean(DistCpOptionSwitch.ATOMIC_COMMIT.getConfigLabel(),false)); }

UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
@Test public void testAppendOption(){ Configuration conf=new Configuration(); Assert.assertFalse(conf.getBoolean(DistCpOptionSwitch.APPEND.getConfigLabel(),false)); Assert.assertFalse(conf.getBoolean(DistCpOptionSwitch.SYNC_FOLDERS.getConfigLabel(),false)); DistCpOptions options=OptionsParser.parse(new String[]{"-update","-append","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"}); options.appendToConf(conf); Assert.assertTrue(conf.getBoolean(DistCpOptionSwitch.APPEND.getConfigLabel(),false)); Assert.assertTrue(conf.getBoolean(DistCpOptionSwitch.SYNC_FOLDERS.getConfigLabel(),false)); try { options=OptionsParser.parse(new String[]{"-append","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"}); fail("Append should fail if update option is not specified"); } catch ( IllegalArgumentException e) { GenericTestUtils.assertExceptionContains("Append is valid only with update options",e); } try { options=OptionsParser.parse(new String[]{"-append","-update","-skipcrccheck","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"}); fail("Append should fail if skipCrc option is specified"); } catch ( IllegalArgumentException e) { GenericTestUtils.assertExceptionContains("Append is disallowed when skipping CRC",e); } }

Class: org.apache.hadoop.tools.TestTools

UtilityVerifier BooleanVerifier HybridVerifier 
@Test public void testDFSAdminInvalidUsageHelp(){ ImmutableSet args=ImmutableSet.of("-report","-saveNamespace","-rollEdits","-restoreFailedStorage","-refreshNodes","-finalizeUpgrade","-metasave","-refreshUserToGroupsMappings","-printTopology","-refreshNamenodes","-deleteBlockPool","-setBalancerBandwidth","-fetchImage"); try { for ( String arg : args) assertTrue(ToolRunner.run(new DFSAdmin(),fillArgs(arg)) == -1); assertTrue(ToolRunner.run(new DFSAdmin(),new String[]{"-help","-some"}) == 0); } catch ( Exception e) { fail("testDFSAdminHelp error" + e); } String pattern="Usage: java DFSAdmin"; checkOutput(new String[]{"-cancel","-renew"},pattern,System.err,DFSAdmin.class); }

Class: org.apache.hadoop.tools.mapred.TestCopyCommitter

APIUtilityVerifier BranchVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testDeleteMissingFlatInterleavedFiles(){ TaskAttemptContext taskAttemptContext=getTaskAttemptContext(config); JobContext jobContext=new JobContextImpl(taskAttemptContext.getConfiguration(),taskAttemptContext.getTaskAttemptID().getJobID()); Configuration conf=jobContext.getConfiguration(); String sourceBase; String targetBase; FileSystem fs=null; try { OutputCommitter committer=new CopyCommitter(null,taskAttemptContext); fs=FileSystem.get(conf); sourceBase="/tmp1/" + String.valueOf(rand.nextLong()); targetBase="/tmp1/" + String.valueOf(rand.nextLong()); TestDistCpUtils.createFile(fs,sourceBase + "/1"); TestDistCpUtils.createFile(fs,sourceBase + "/3"); TestDistCpUtils.createFile(fs,sourceBase + "/4"); TestDistCpUtils.createFile(fs,sourceBase + "/5"); TestDistCpUtils.createFile(fs,sourceBase + "/7"); TestDistCpUtils.createFile(fs,sourceBase + "/8"); TestDistCpUtils.createFile(fs,sourceBase + "/9"); TestDistCpUtils.createFile(fs,targetBase + "/2"); TestDistCpUtils.createFile(fs,targetBase + "/4"); TestDistCpUtils.createFile(fs,targetBase + "/5"); TestDistCpUtils.createFile(fs,targetBase + "/7"); TestDistCpUtils.createFile(fs,targetBase + "/9"); TestDistCpUtils.createFile(fs,targetBase + "/A"); DistCpOptions options=new DistCpOptions(Arrays.asList(new Path(sourceBase)),new Path("/out")); options.setSyncFolder(true); options.setDeleteMissing(true); options.appendToConf(conf); CopyListing listing=new GlobbedCopyListing(conf,CREDENTIALS); Path listingFile=new Path("/tmp1/" + String.valueOf(rand.nextLong())); listing.buildListing(listingFile,options); conf.set(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH,targetBase); conf.set(DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH,targetBase); committer.commitJob(jobContext); if (!TestDistCpUtils.checkIfFoldersAreInSync(fs,targetBase,sourceBase)) { Assert.fail("Source and target folders are not in sync"); } Assert.assertEquals(fs.listStatus(new Path(targetBase)).length,4); committer.commitJob(jobContext); if (!TestDistCpUtils.checkIfFoldersAreInSync(fs,targetBase,sourceBase)) { Assert.fail("Source and target folders are not in sync"); } Assert.assertEquals(fs.listStatus(new Path(targetBase)).length,4); } catch ( IOException e) { LOG.error("Exception encountered while testing for delete missing",e); Assert.fail("Delete missing failure"); } finally { TestDistCpUtils.delete(fs,"/tmp1"); conf.set(DistCpConstants.CONF_LABEL_DELETE_MISSING,"false"); } }

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
@Test public void testAtomicCommitExistingFinal(){ TaskAttemptContext taskAttemptContext=getTaskAttemptContext(config); JobContext jobContext=new JobContextImpl(taskAttemptContext.getConfiguration(),taskAttemptContext.getTaskAttemptID().getJobID()); Configuration conf=jobContext.getConfiguration(); String workPath="/tmp1/" + String.valueOf(rand.nextLong()); String finalPath="/tmp1/" + String.valueOf(rand.nextLong()); FileSystem fs=null; try { OutputCommitter committer=new CopyCommitter(null,taskAttemptContext); fs=FileSystem.get(conf); fs.mkdirs(new Path(workPath)); fs.mkdirs(new Path(finalPath)); conf.set(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH,workPath); conf.set(DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH,finalPath); conf.setBoolean(DistCpConstants.CONF_LABEL_ATOMIC_COPY,true); Assert.assertTrue(fs.exists(new Path(workPath))); Assert.assertTrue(fs.exists(new Path(finalPath))); try { committer.commitJob(jobContext); Assert.fail("Should not be able to atomic-commit to pre-existing path."); } catch ( Exception exception) { Assert.assertTrue(fs.exists(new Path(workPath))); Assert.assertTrue(fs.exists(new Path(finalPath))); LOG.info("Atomic-commit Test pass."); } } catch ( IOException e) { LOG.error("Exception encountered while testing for atomic commit.",e); Assert.fail("Atomic commit failure"); } finally { TestDistCpUtils.delete(fs,workPath); TestDistCpUtils.delete(fs,finalPath); conf.setBoolean(DistCpConstants.CONF_LABEL_ATOMIC_COPY,false); } }

TestInitializer UtilityVerifier HybridVerifier 
@Before public void createMetaFolder(){ config.set(DistCpConstants.CONF_LABEL_META_FOLDER,"/meta"); Path meta=new Path("/meta"); try { cluster.getFileSystem().mkdirs(meta); } catch ( IOException e) { LOG.error("Exception encountered while creating meta folder",e); Assert.fail("Unable to create meta folder"); } }

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
@Test public void testAtomicCommitMissingFinal(){ TaskAttemptContext taskAttemptContext=getTaskAttemptContext(config); JobContext jobContext=new JobContextImpl(taskAttemptContext.getConfiguration(),taskAttemptContext.getTaskAttemptID().getJobID()); Configuration conf=jobContext.getConfiguration(); String workPath="/tmp1/" + String.valueOf(rand.nextLong()); String finalPath="/tmp1/" + String.valueOf(rand.nextLong()); FileSystem fs=null; try { OutputCommitter committer=new CopyCommitter(null,taskAttemptContext); fs=FileSystem.get(conf); fs.mkdirs(new Path(workPath)); conf.set(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH,workPath); conf.set(DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH,finalPath); conf.setBoolean(DistCpConstants.CONF_LABEL_ATOMIC_COPY,true); Assert.assertTrue(fs.exists(new Path(workPath))); Assert.assertFalse(fs.exists(new Path(finalPath))); committer.commitJob(jobContext); Assert.assertFalse(fs.exists(new Path(workPath))); Assert.assertTrue(fs.exists(new Path(finalPath))); committer.commitJob(jobContext); Assert.assertFalse(fs.exists(new Path(workPath))); Assert.assertTrue(fs.exists(new Path(finalPath))); } catch ( IOException e) { LOG.error("Exception encountered while testing for preserve status",e); Assert.fail("Atomic commit failure"); } finally { TestDistCpUtils.delete(fs,workPath); TestDistCpUtils.delete(fs,finalPath); conf.setBoolean(DistCpConstants.CONF_LABEL_ATOMIC_COPY,false); } }

APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testNoCommitAction(){ TaskAttemptContext taskAttemptContext=getTaskAttemptContext(config); JobContext jobContext=new JobContextImpl(taskAttemptContext.getConfiguration(),taskAttemptContext.getTaskAttemptID().getJobID()); try { OutputCommitter committer=new CopyCommitter(null,taskAttemptContext); committer.commitJob(jobContext); Assert.assertEquals(taskAttemptContext.getStatus(),"Commit Successful"); committer.commitJob(jobContext); Assert.assertEquals(taskAttemptContext.getStatus(),"Commit Successful"); } catch ( IOException e) { LOG.error("Exception encountered ",e); Assert.fail("Commit failed"); } }

TestCleaner BranchVerifier UtilityVerifier HybridVerifier 
@After public void cleanupMetaFolder(){ Path meta=new Path("/meta"); try { if (cluster.getFileSystem().exists(meta)) { cluster.getFileSystem().delete(meta,true); Assert.fail("Expected meta folder to be deleted"); } } catch ( IOException e) { LOG.error("Exception encountered while cleaning up folder",e); Assert.fail("Unable to clean up meta folder"); } }

Class: org.apache.hadoop.tools.mapred.TestCopyMapper

UtilityVerifier BooleanVerifier HybridVerifier 
@Test(timeout=40000) public void testCopyFailOnBlockSizeDifference(){ try { deleteState(); createSourceDataWithDifferentBlockSize(); FileSystem fs=cluster.getFileSystem(); CopyMapper copyMapper=new CopyMapper(); StubContext stubContext=new StubContext(getConfiguration(),null,0); Mapper.Context context=stubContext.getContext(); Configuration configuration=context.getConfiguration(); EnumSet fileAttributes=EnumSet.noneOf(DistCpOptions.FileAttribute.class); configuration.set(DistCpOptionSwitch.PRESERVE_STATUS.getConfigLabel(),DistCpUtils.packAttributes(fileAttributes)); copyMapper.setup(context); for ( Path path : pathList) { final FileStatus fileStatus=fs.getFileStatus(path); copyMapper.map(new Text(DistCpUtils.getRelativePath(new Path(SOURCE_PATH),path)),new CopyListingFileStatus(fileStatus),context); } Assert.fail("Copy should have failed because of block-size difference."); } catch ( Exception exception) { Assert.assertTrue("Failure exception should have suggested the use of -pb.",exception.getCause().getCause().getMessage().contains("pb")); Assert.assertTrue("Failure exception should have suggested the use of -skipCrc.",exception.getCause().getCause().getMessage().contains("skipCrc")); } }

UtilityVerifier BooleanVerifier HybridVerifier 
@Test(timeout=40000) public void testDirToFile(){ try { deleteState(); createSourceData(); FileSystem fs=cluster.getFileSystem(); CopyMapper copyMapper=new CopyMapper(); StubContext stubContext=new StubContext(getConfiguration(),null,0); Mapper.Context context=stubContext.getContext(); mkdirs(SOURCE_PATH + "/src/file"); touchFile(TARGET_PATH + "/src/file"); try { copyMapper.setup(context); copyMapper.map(new Text("/src/file"),new CopyListingFileStatus(fs.getFileStatus(new Path(SOURCE_PATH + "/src/file"))),context); } catch ( IOException e) { Assert.assertTrue(e.getMessage().startsWith("Can't replace")); } } catch ( Exception e) { LOG.error("Exception encountered ",e); Assert.fail("Test failed: " + e.getMessage()); } }

UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=40000) public void testSkipCopyNoPerms(){ try { deleteState(); createSourceData(); UserGroupInformation tmpUser=UserGroupInformation.createRemoteUser("guest"); final CopyMapper copyMapper=new CopyMapper(); final StubContext stubContext=tmpUser.doAs(new PrivilegedAction(){ @Override public StubContext run(){ try { return new StubContext(getConfiguration(),null,0); } catch ( Exception e) { LOG.error("Exception encountered ",e); throw new RuntimeException(e); } } } ); final Mapper.Context context=stubContext.getContext(); EnumSet preserveStatus=EnumSet.allOf(DistCpOptions.FileAttribute.class); preserveStatus.remove(DistCpOptions.FileAttribute.ACL); preserveStatus.remove(DistCpOptions.FileAttribute.XATTR); context.getConfiguration().set(DistCpConstants.CONF_LABEL_PRESERVE_STATUS,DistCpUtils.packAttributes(preserveStatus)); touchFile(SOURCE_PATH + "/src/file"); touchFile(TARGET_PATH + "/src/file"); cluster.getFileSystem().setPermission(new Path(SOURCE_PATH + "/src/file"),new FsPermission(FsAction.READ,FsAction.READ,FsAction.READ)); cluster.getFileSystem().setPermission(new Path(TARGET_PATH + "/src/file"),new FsPermission(FsAction.READ,FsAction.READ,FsAction.READ)); final FileSystem tmpFS=tmpUser.doAs(new PrivilegedAction(){ @Override public FileSystem run(){ try { return FileSystem.get(configuration); } catch ( IOException e) { LOG.error("Exception encountered ",e); Assert.fail("Test failed: " + e.getMessage()); throw new RuntimeException("Test ought to fail here"); } } } ); tmpUser.doAs(new PrivilegedAction(){ @Override public Integer run(){ try { copyMapper.setup(context); copyMapper.map(new Text("/src/file"),new CopyListingFileStatus(tmpFS.getFileStatus(new Path(SOURCE_PATH + "/src/file"))),context); Assert.assertEquals(stubContext.getWriter().values().size(),1); Assert.assertTrue(stubContext.getWriter().values().get(0).toString().startsWith("SKIP")); Assert.assertTrue(stubContext.getWriter().values().get(0).toString().contains(SOURCE_PATH + "/src/file")); } catch ( Exception e) { throw new RuntimeException(e); } return null; } } ); } catch ( Exception e) { LOG.error("Exception encountered ",e); Assert.fail("Test failed: " + e.getMessage()); } }

UtilityVerifier BooleanVerifier HybridVerifier 
@Test(timeout=40000) public void testPreserve(){ try { deleteState(); createSourceData(); UserGroupInformation tmpUser=UserGroupInformation.createRemoteUser("guest"); final CopyMapper copyMapper=new CopyMapper(); final Mapper.Context context=tmpUser.doAs(new PrivilegedAction.Context>(){ @Override public Mapper.Context run(){ try { StubContext stubContext=new StubContext(getConfiguration(),null,0); return stubContext.getContext(); } catch ( Exception e) { LOG.error("Exception encountered ",e); throw new RuntimeException(e); } } } ); EnumSet preserveStatus=EnumSet.allOf(DistCpOptions.FileAttribute.class); preserveStatus.remove(DistCpOptions.FileAttribute.ACL); preserveStatus.remove(DistCpOptions.FileAttribute.XATTR); context.getConfiguration().set(DistCpConstants.CONF_LABEL_PRESERVE_STATUS,DistCpUtils.packAttributes(preserveStatus)); touchFile(SOURCE_PATH + "/src/file"); mkdirs(TARGET_PATH); cluster.getFileSystem().setPermission(new Path(TARGET_PATH),new FsPermission((short)511)); final FileSystem tmpFS=tmpUser.doAs(new PrivilegedAction(){ @Override public FileSystem run(){ try { return FileSystem.get(configuration); } catch ( IOException e) { LOG.error("Exception encountered ",e); Assert.fail("Test failed: " + e.getMessage()); throw new RuntimeException("Test ought to fail here"); } } } ); tmpUser.doAs(new PrivilegedAction(){ @Override public Integer run(){ try { copyMapper.setup(context); copyMapper.map(new Text("/src/file"),new CopyListingFileStatus(tmpFS.getFileStatus(new Path(SOURCE_PATH + "/src/file"))),context); Assert.fail("Expected copy to fail"); } catch ( AccessControlException e) { Assert.assertTrue("Got exception: " + e.getMessage(),true); } catch ( Exception e) { throw new RuntimeException(e); } return null; } } ); } catch ( Exception e) { LOG.error("Exception encountered ",e); Assert.fail("Test failed: " + e.getMessage()); } }

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
/** * If a single file is being copied to a location where the file (of the same * name) already exists, then the file shouldn't be skipped. */ @Test(timeout=40000) public void testSingleFileCopy(){ try { deleteState(); touchFile(SOURCE_PATH + "/1"); Path sourceFilePath=pathList.get(0); Path targetFilePath=new Path(sourceFilePath.toString().replaceAll(SOURCE_PATH,TARGET_PATH)); touchFile(targetFilePath.toString()); FileSystem fs=cluster.getFileSystem(); CopyMapper copyMapper=new CopyMapper(); StubContext stubContext=new StubContext(getConfiguration(),null,0); Mapper.Context context=stubContext.getContext(); context.getConfiguration().set(DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH,targetFilePath.getParent().toString()); copyMapper.setup(context); final CopyListingFileStatus sourceFileStatus=new CopyListingFileStatus(fs.getFileStatus(sourceFilePath)); long before=fs.getFileStatus(targetFilePath).getModificationTime(); copyMapper.map(new Text(DistCpUtils.getRelativePath(new Path(SOURCE_PATH),sourceFilePath)),sourceFileStatus,context); long after=fs.getFileStatus(targetFilePath).getModificationTime(); Assert.assertTrue("File should have been skipped",before == after); context.getConfiguration().set(DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH,targetFilePath.toString()); copyMapper.setup(context); before=fs.getFileStatus(targetFilePath).getModificationTime(); try { Thread.sleep(2); } catch ( Throwable ignore) { } copyMapper.map(new Text(DistCpUtils.getRelativePath(new Path(SOURCE_PATH),sourceFilePath)),sourceFileStatus,context); after=fs.getFileStatus(targetFilePath).getModificationTime(); Assert.assertTrue("File should have been overwritten.",before < after); } catch ( Exception exception) { Assert.fail("Unexpected exception: " + exception.getMessage()); exception.printStackTrace(); } }

UtilityVerifier BooleanVerifier HybridVerifier 
@Test(timeout=40000) public void testFileToDir(){ try { deleteState(); createSourceData(); FileSystem fs=cluster.getFileSystem(); CopyMapper copyMapper=new CopyMapper(); StubContext stubContext=new StubContext(getConfiguration(),null,0); Mapper.Context context=stubContext.getContext(); touchFile(SOURCE_PATH + "/src/file"); mkdirs(TARGET_PATH + "/src/file"); try { copyMapper.setup(context); copyMapper.map(new Text("/src/file"),new CopyListingFileStatus(fs.getFileStatus(new Path(SOURCE_PATH + "/src/file"))),context); } catch ( IOException e) { Assert.assertTrue(e.getMessage().startsWith("Can't replace")); } } catch ( Exception e) { LOG.error("Exception encountered ",e); Assert.fail("Test failed: " + e.getMessage()); } }

Class: org.apache.hadoop.tools.mapred.TestCopyOutputFormat

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testSetWorkingDirectory(){ try { Job job=Job.getInstance(new Configuration()); Assert.assertEquals(null,CopyOutputFormat.getWorkingDirectory(job)); job.getConfiguration().set(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH,""); Assert.assertEquals(null,CopyOutputFormat.getWorkingDirectory(job)); Path directory=new Path("/tmp/test"); CopyOutputFormat.setWorkingDirectory(job,directory); Assert.assertEquals(directory,CopyOutputFormat.getWorkingDirectory(job)); Assert.assertEquals(directory.toString(),job.getConfiguration().get(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH)); } catch ( IOException e) { LOG.error("Exception encountered while running test",e); Assert.fail("Failed while testing for set Working Directory"); } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testSetCommitDirectory(){ try { Job job=Job.getInstance(new Configuration()); Assert.assertEquals(null,CopyOutputFormat.getCommitDirectory(job)); job.getConfiguration().set(DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH,""); Assert.assertEquals(null,CopyOutputFormat.getCommitDirectory(job)); Path directory=new Path("/tmp/test"); CopyOutputFormat.setCommitDirectory(job,directory); Assert.assertEquals(directory,CopyOutputFormat.getCommitDirectory(job)); Assert.assertEquals(directory.toString(),job.getConfiguration().get(DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH)); } catch ( IOException e) { LOG.error("Exception encountered while running test",e); Assert.fail("Failed while testing for set Commit Directory"); } }

UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
@Test public void testGetOutputCommitter(){ try { TaskAttemptContext context=new TaskAttemptContextImpl(new Configuration(),new TaskAttemptID("200707121733",1,TaskType.MAP,1,1)); context.getConfiguration().set("mapred.output.dir","/out"); Assert.assertTrue(new CopyOutputFormat().getOutputCommitter(context) instanceof CopyCommitter); } catch ( IOException e) { LOG.error("Exception encountered ",e); Assert.fail("Unable to get output committer"); } }

Class: org.apache.hadoop.tools.mapred.TestRetriableFileCopyCommand

NullVerifier EqualityVerifier HybridVerifier 
@SuppressWarnings("rawtypes") @Test public void testFailOnCloseError() throws Exception { Mapper.Context context=mock(Mapper.Context.class); doReturn(new Configuration()).when(context).getConfiguration(); Exception expectedEx=new IOException("boom"); OutputStream out=mock(OutputStream.class); doThrow(expectedEx).when(out).close(); File f=File.createTempFile(this.getClass().getSimpleName(),null); f.deleteOnExit(); FileStatus stat=new FileStatus(1L,false,1,1024,0,new Path(f.toURI())); Exception actualEx=null; try { new RetriableFileCopyCommand("testFailOnCloseError",FileAction.OVERWRITE).copyBytes(stat,0,out,512,context); } catch ( Exception e) { actualEx=e; } assertNotNull("close didn't fail",actualEx); assertEquals(expectedEx,actualEx); }

Class: org.apache.hadoop.tools.mapred.lib.TestDynamicInputFormat

IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testGetSplits() throws Exception { DistCpOptions options=getOptions(); Configuration configuration=new Configuration(); configuration.set("mapred.map.tasks",String.valueOf(options.getMaxMaps())); CopyListing.getCopyListing(configuration,CREDENTIALS,options).buildListing(new Path(cluster.getFileSystem().getUri().toString() + "/tmp/testDynInputFormat/fileList.seq"),options); JobContext jobContext=new JobContextImpl(configuration,new JobID()); DynamicInputFormat inputFormat=new DynamicInputFormat(); List splits=inputFormat.getSplits(jobContext); int nFiles=0; int taskId=0; for ( InputSplit split : splits) { RecordReader recordReader=inputFormat.createRecordReader(split,null); StubContext stubContext=new StubContext(jobContext.getConfiguration(),recordReader,taskId); final TaskAttemptContext taskAttemptContext=stubContext.getContext(); recordReader.initialize(splits.get(0),taskAttemptContext); float previousProgressValue=0f; while (recordReader.nextKeyValue()) { CopyListingFileStatus fileStatus=recordReader.getCurrentValue(); String source=fileStatus.getPath().toString(); System.out.println(source); Assert.assertTrue(expectedFilePaths.contains(source)); final float progress=recordReader.getProgress(); Assert.assertTrue(progress >= previousProgressValue); Assert.assertTrue(progress >= 0.0f); Assert.assertTrue(progress <= 1.0f); previousProgressValue=progress; ++nFiles; } Assert.assertTrue(recordReader.getProgress() == 1.0f); ++taskId; } Assert.assertEquals(expectedFilePaths.size(),nFiles); }

Class: org.apache.hadoop.tools.rumen.TestHistograms

APIUtilityVerifier IterativeVerifier BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
/** * @throws IOExceptionThere should be files in the directory named by * ${test.build.data}/rumen/histogram-test . * There will be pairs of files, inputXxx.json and goldXxx.json . * We read the input file as a HistogramRawTestData in json. Then we * create a Histogram using the data field, and then a * LoggedDiscreteCDF using the percentiles and scale field. Finally, * we read the corresponding goldXxx.json as a LoggedDiscreteCDF and * deepCompare them. */ @Test public void testHistograms() throws IOException { final Configuration conf=new Configuration(); final FileSystem lfs=FileSystem.getLocal(conf); final Path rootInputDir=new Path(System.getProperty("test.tools.input.dir","")).makeQualified(lfs); final Path rootInputFile=new Path(rootInputDir,"rumen/histogram-tests"); FileStatus[] tests=lfs.listStatus(rootInputFile); for (int i=0; i < tests.length; ++i) { Path filePath=tests[i].getPath(); String fileName=filePath.getName(); if (fileName.startsWith("input")) { String testName=fileName.substring("input".length()); Path goldFilePath=new Path(rootInputFile,"gold" + testName); assertTrue("Gold file dies not exist",lfs.exists(goldFilePath)); LoggedDiscreteCDF newResult=histogramFileToCDF(filePath,lfs); System.out.println("Testing a Histogram for " + fileName); FSDataInputStream goldStream=lfs.open(goldFilePath); JsonObjectMapperParser parser=new JsonObjectMapperParser(goldStream,LoggedDiscreteCDF.class); try { LoggedDiscreteCDF dcdf=parser.getNext(); dcdf.deepCompare(newResult,new TreePath(null,"")); } catch ( DeepInequalityException e) { fail(e.path.toString()); } finally { parser.close(); } } } }

Class: org.apache.hadoop.tools.util.TestDistCpUtils

APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testPreserve(){ try { FileSystem fs=FileSystem.get(config); EnumSet attributes=EnumSet.noneOf(FileAttribute.class); Path path=new Path("/tmp/abc"); Path src=new Path("/tmp/src"); fs.mkdirs(path); fs.mkdirs(src); CopyListingFileStatus srcStatus=new CopyListingFileStatus(fs.getFileStatus(src)); FsPermission noPerm=new FsPermission((short)0); fs.setPermission(path,noPerm); fs.setOwner(path,"nobody","nobody"); DistCpUtils.preserve(fs,path,srcStatus,attributes,false); FileStatus target=fs.getFileStatus(path); Assert.assertEquals(target.getPermission(),noPerm); Assert.assertEquals(target.getOwner(),"nobody"); Assert.assertEquals(target.getGroup(),"nobody"); attributes.add(FileAttribute.PERMISSION); DistCpUtils.preserve(fs,path,srcStatus,attributes,false); target=fs.getFileStatus(path); Assert.assertEquals(target.getPermission(),srcStatus.getPermission()); Assert.assertEquals(target.getOwner(),"nobody"); Assert.assertEquals(target.getGroup(),"nobody"); attributes.add(FileAttribute.GROUP); attributes.add(FileAttribute.USER); DistCpUtils.preserve(fs,path,srcStatus,attributes,false); target=fs.getFileStatus(path); Assert.assertEquals(target.getPermission(),srcStatus.getPermission()); Assert.assertEquals(target.getOwner(),srcStatus.getOwner()); Assert.assertEquals(target.getGroup(),srcStatus.getGroup()); fs.delete(path,true); fs.delete(src,true); } catch ( IOException e) { LOG.error("Exception encountered ",e); Assert.fail("Preserve test failure"); } }

Class: org.apache.hadoop.util.TestApplicationClassLoader

APIUtilityVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testGetResource() throws IOException { URL testJar=makeTestJar().toURI().toURL(); ClassLoader currentClassLoader=getClass().getClassLoader(); ClassLoader appClassloader=new ApplicationClassLoader(new URL[]{testJar},currentClassLoader,null); assertNull("Resource should be null for current classloader",currentClassLoader.getResourceAsStream("resource.txt")); InputStream in=appClassloader.getResourceAsStream("resource.txt"); assertNotNull("Resource should not be null for app classloader",in); assertEquals("hello",IOUtils.toString(in)); }

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testConstructUrlsFromClasspath() throws Exception { File file=new File(testDir,"file"); assertTrue("Create file",file.createNewFile()); File dir=new File(testDir,"dir"); assertTrue("Make dir",dir.mkdir()); File jarsDir=new File(testDir,"jarsdir"); assertTrue("Make jarsDir",jarsDir.mkdir()); File nonJarFile=new File(jarsDir,"nonjar"); assertTrue("Create non-jar file",nonJarFile.createNewFile()); File jarFile=new File(jarsDir,"a.jar"); assertTrue("Create jar file",jarFile.createNewFile()); File nofile=new File(testDir,"nofile"); StringBuilder cp=new StringBuilder(); cp.append(file.getAbsolutePath()).append(File.pathSeparator).append(dir.getAbsolutePath()).append(File.pathSeparator).append(jarsDir.getAbsolutePath() + "/*").append(File.pathSeparator).append(nofile.getAbsolutePath()).append(File.pathSeparator).append(nofile.getAbsolutePath() + "/*").append(File.pathSeparator); URL[] urls=constructUrlsFromClasspath(cp.toString()); assertEquals(3,urls.length); assertEquals(file.toURI().toURL(),urls[0]); assertEquals(dir.toURI().toURL(),urls[1]); assertEquals(jarFile.toURI().toURL(),urls[2]); }

Class: org.apache.hadoop.util.TestAsyncDiskService

BranchVerifier UtilityVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * This test creates some ExampleTasks and runs them. */ @Test public void testAsyncDiskService() throws Throwable { String[] vols=new String[]{"/0","/1"}; AsyncDiskService service=new AsyncDiskService(vols); int total=100; for (int i=0; i < total; i++) { service.execute(vols[i % 2],new ExampleTask()); } Exception e=null; try { service.execute("no_such_volume",new ExampleTask()); } catch ( RuntimeException ex) { e=ex; } assertNotNull("Executing a task on a non-existing volume should throw an " + "Exception.",e); service.shutdown(); if (!service.awaitTermination(5000)) { fail("AsyncDiskService didn't shutdown in 5 seconds."); } assertEquals(total,count); }

Class: org.apache.hadoop.util.TestClassUtil

BooleanVerifier NullVerifier HybridVerifier 
@Test(timeout=1000) public void testFindContainingJar(){ String containingJar=ClassUtil.findContainingJar(Logger.class); Assert.assertNotNull("Containing jar not found for Logger",containingJar); File jarFile=new File(containingJar); Assert.assertTrue("Containing jar does not exist on file system",jarFile.exists()); Assert.assertTrue("Incorrect jar file" + containingJar,jarFile.getName().matches("log4j.+[.]jar")); }

Class: org.apache.hadoop.util.TestClasspath

TestInitializer BooleanVerifier HybridVerifier 
@Before public void setUp(){ assertTrue(FileUtil.fullyDelete(TEST_DIR)); assertTrue(TEST_DIR.mkdirs()); oldStdout=System.out; oldStderr=System.err; stdout=new ByteArrayOutputStream(); printStdout=new PrintStream(stdout); System.setOut(printStdout); stderr=new ByteArrayOutputStream(); printStderr=new PrintStream(stderr); System.setErr(printStderr); }

APIUtilityVerifier UtilityVerifier BooleanVerifier HybridVerifier 
@Test public void testUnrecognized(){ try { Classpath.main(new String[]{"--notarealoption"}); fail("expected exit"); } catch ( ExitUtil.ExitException e) { assertTrue(stdout.toByteArray().length == 0); String strErr=new String(stderr.toByteArray(),UTF8); assertTrue(strErr.contains("unrecognized option")); } }

APIUtilityVerifier UtilityVerifier BooleanVerifier HybridVerifier 
@Test public void testJarFileMissing() throws IOException { try { Classpath.main(new String[]{"--jar"}); fail("expected exit"); } catch ( ExitUtil.ExitException e) { assertTrue(stdout.toByteArray().length == 0); String strErr=new String(stderr.toByteArray(),UTF8); assertTrue(strErr.contains("requires path of jar")); } }

TestCleaner BooleanVerifier HybridVerifier 
@After public void tearDown(){ System.setOut(oldStdout); System.setErr(oldStderr); IOUtils.cleanup(LOG,printStdout,printStderr); assertTrue(FileUtil.fullyDelete(TEST_DIR)); }

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testGlob(){ Classpath.main(new String[]{"--glob"}); String strOut=new String(stdout.toByteArray(),UTF8); assertEquals(System.getProperty("java.class.path"),strOut.trim()); assertTrue(stderr.toByteArray().length == 0); }

Class: org.apache.hadoop.util.TestDataChecksum

BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testEquality(){ assertEquals(DataChecksum.newDataChecksum(DataChecksum.Type.CRC32,512),DataChecksum.newDataChecksum(DataChecksum.Type.CRC32,512)); assertFalse(DataChecksum.newDataChecksum(DataChecksum.Type.CRC32,512).equals(DataChecksum.newDataChecksum(DataChecksum.Type.CRC32,1024))); assertFalse(DataChecksum.newDataChecksum(DataChecksum.Type.CRC32,512).equals(DataChecksum.newDataChecksum(DataChecksum.Type.CRC32C,512))); }

Class: org.apache.hadoop.util.TestDirectBufferPool

InternalCallVerifier IdentityVerifier EqualityVerifier HybridVerifier 
@Test public void testBuffersAreReset(){ ByteBuffer a=pool.getBuffer(100); a.putInt(0xdeadbeef); assertEquals(96,a.remaining()); pool.returnBuffer(a); ByteBuffer b=pool.getBuffer(100); assertSame(a,b); assertEquals(100,a.remaining()); pool.returnBuffer(b); }

InternalCallVerifier IdentityVerifier EqualityVerifier HybridVerifier 
@Test public void testBasics(){ ByteBuffer a=pool.getBuffer(100); assertEquals(100,a.capacity()); assertEquals(100,a.remaining()); pool.returnBuffer(a); ByteBuffer b=pool.getBuffer(100); assertSame(a,b); ByteBuffer c=pool.getBuffer(100); assertNotSame(b,c); pool.returnBuffer(b); pool.returnBuffer(c); }

Class: org.apache.hadoop.util.TestHostsFileReader

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testHostFileReaderWithCommentsOnly() throws Exception { FileWriter efw=new FileWriter(excludesFile); FileWriter ifw=new FileWriter(includesFile); efw.write("#DFS-Hosts-excluded\n"); efw.close(); ifw.write("#Hosts-in-DFS\n"); ifw.close(); HostsFileReader hfp=new HostsFileReader(includesFile,excludesFile); int includesLen=hfp.getHosts().size(); int excludesLen=hfp.getExcludedHosts().size(); assertEquals(0,includesLen); assertEquals(0,excludesLen); assertFalse(hfp.getHosts().contains("somehost5")); assertFalse(hfp.getExcludedHosts().contains("somehost5")); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testHostFileReaderWithTabs() throws Exception { FileWriter efw=new FileWriter(excludesFile); FileWriter ifw=new FileWriter(includesFile); efw.write("#DFS-Hosts-excluded\n"); efw.write(" \n"); efw.write(" somehost \t somehost2 \n somehost4"); efw.write(" somehost3 \t # somehost5"); efw.close(); ifw.write("#Hosts-in-DFS\n"); ifw.write(" \n"); ifw.write(" somehost \t somehost2 \n somehost4"); ifw.write(" somehost3 \t # somehost5"); ifw.close(); HostsFileReader hfp=new HostsFileReader(includesFile,excludesFile); int includesLen=hfp.getHosts().size(); int excludesLen=hfp.getExcludedHosts().size(); assertEquals(4,includesLen); assertEquals(4,excludesLen); assertTrue(hfp.getHosts().contains("somehost2")); assertFalse(hfp.getHosts().contains("somehost5")); assertTrue(hfp.getExcludedHosts().contains("somehost2")); assertFalse(hfp.getExcludedHosts().contains("somehost5")); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testHostFileReaderWithSpaces() throws Exception { FileWriter efw=new FileWriter(excludesFile); FileWriter ifw=new FileWriter(includesFile); efw.write("#DFS-Hosts-excluded\n"); efw.write(" somehost somehost2"); efw.write(" somehost3 # somehost4"); efw.close(); ifw.write("#Hosts-in-DFS\n"); ifw.write(" somehost somehost2"); ifw.write(" somehost3 # somehost4"); ifw.close(); HostsFileReader hfp=new HostsFileReader(includesFile,excludesFile); int includesLen=hfp.getHosts().size(); int excludesLen=hfp.getExcludedHosts().size(); assertEquals(3,includesLen); assertEquals(3,excludesLen); assertTrue(hfp.getHosts().contains("somehost3")); assertFalse(hfp.getHosts().contains("somehost5")); assertFalse(hfp.getHosts().contains("somehost4")); assertTrue(hfp.getExcludedHosts().contains("somehost3")); assertFalse(hfp.getExcludedHosts().contains("somehost5")); assertFalse(hfp.getExcludedHosts().contains("somehost4")); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testHostFileReaderWithNull() throws Exception { FileWriter efw=new FileWriter(excludesFile); FileWriter ifw=new FileWriter(includesFile); efw.close(); ifw.close(); HostsFileReader hfp=new HostsFileReader(includesFile,excludesFile); int includesLen=hfp.getHosts().size(); int excludesLen=hfp.getExcludedHosts().size(); assertEquals(0,includesLen); assertEquals(0,excludesLen); assertFalse(hfp.getHosts().contains("somehost5")); assertFalse(hfp.getExcludedHosts().contains("somehost5")); }

UtilityVerifier BooleanVerifier HybridVerifier 
@Test public void testRefreshHostFileReaderWithNonexistentFile() throws Exception { FileWriter efw=new FileWriter(excludesFile); FileWriter ifw=new FileWriter(includesFile); efw.close(); ifw.close(); HostsFileReader hfp=new HostsFileReader(includesFile,excludesFile); assertTrue(INCLUDES_FILE.delete()); try { hfp.refresh(); Assert.fail("Should throw FileNotFoundException"); } catch ( FileNotFoundException ex) { } }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testHostsFileReader() throws Exception { FileWriter efw=new FileWriter(excludesFile); FileWriter ifw=new FileWriter(includesFile); efw.write("#DFS-Hosts-excluded\n"); efw.write("somehost1\n"); efw.write("#This-is-comment\n"); efw.write("somehost2\n"); efw.write("somehost3 # host3\n"); efw.write("somehost4\n"); efw.write("somehost4 somehost5\n"); efw.close(); ifw.write("#Hosts-in-DFS\n"); ifw.write("somehost1\n"); ifw.write("somehost2\n"); ifw.write("somehost3\n"); ifw.write("#This-is-comment\n"); ifw.write("somehost4 # host4\n"); ifw.write("somehost4 somehost5\n"); ifw.close(); HostsFileReader hfp=new HostsFileReader(includesFile,excludesFile); int includesLen=hfp.getHosts().size(); int excludesLen=hfp.getExcludedHosts().size(); assertEquals(5,includesLen); assertEquals(5,excludesLen); assertTrue(hfp.getHosts().contains("somehost5")); assertFalse(hfp.getHosts().contains("host3")); assertTrue(hfp.getExcludedHosts().contains("somehost5")); assertFalse(hfp.getExcludedHosts().contains("host4")); }

Class: org.apache.hadoop.util.TestIdentityHashStore

IterativeVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=60000) public void testAdditionsAndRemovals(){ IdentityHashStore store=new IdentityHashStore(0); final int NUM_KEYS=1000; LOG.debug("generating " + NUM_KEYS + " keys"); final List keys=new ArrayList(NUM_KEYS); for (int i=0; i < NUM_KEYS; i++) { keys.add(new Key("key " + i)); } for (int i=0; i < NUM_KEYS; i++) { store.put(keys.get(i),i); } store.visitAll(new Visitor(){ @Override public void accept( Key k, Integer v){ Assert.assertTrue(keys.contains(k)); } } ); for (int i=0; i < NUM_KEYS; i++) { Assert.assertEquals(Integer.valueOf(i),store.remove(keys.get(i))); } store.visitAll(new Visitor(){ @Override public void accept( Key k, Integer v){ Assert.fail("expected all entries to be removed"); } } ); Assert.assertTrue("expected the store to be " + "empty, but found " + store.numElements() + " elements.",store.isEmpty()); Assert.assertEquals(1024,store.capacity()); }

UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=60000) public void testStartingWithZeroCapacity(){ IdentityHashStore store=new IdentityHashStore(0); store.visitAll(new Visitor(){ @Override public void accept( Key k, Integer v){ Assert.fail("found key " + k + " in empty IdentityHashStore."); } } ); Assert.assertTrue(store.isEmpty()); final Key key1=new Key("key1"); Integer value1=new Integer(100); store.put(key1,value1); Assert.assertTrue(!store.isEmpty()); Assert.assertEquals(value1,store.get(key1)); store.visitAll(new Visitor(){ @Override public void accept( Key k, Integer v){ Assert.assertEquals(key1,k); } } ); Assert.assertEquals(value1,store.remove(key1)); Assert.assertTrue(store.isEmpty()); }

IterativeVerifier UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=60000) public void testDuplicateInserts(){ IdentityHashStore store=new IdentityHashStore(4); store.visitAll(new Visitor(){ @Override public void accept( Key k, Integer v){ Assert.fail("found key " + k + " in empty IdentityHashStore."); } } ); Assert.assertTrue(store.isEmpty()); Key key1=new Key("key1"); Integer value1=new Integer(100); Integer value2=new Integer(200); Integer value3=new Integer(300); store.put(key1,value1); Key equalToKey1=new Key("key1"); Assert.assertNull(store.get(equalToKey1)); Assert.assertTrue(!store.isEmpty()); Assert.assertEquals(value1,store.get(key1)); store.put(key1,value2); store.put(key1,value3); final List allValues=new LinkedList(); store.visitAll(new Visitor(){ @Override public void accept( Key k, Integer v){ allValues.add(v); } } ); Assert.assertEquals(3,allValues.size()); for (int i=0; i < 3; i++) { Integer value=store.remove(key1); Assert.assertTrue(allValues.remove(value)); } Assert.assertNull(store.remove(key1)); Assert.assertTrue(store.isEmpty()); }

Class: org.apache.hadoop.util.TestMachineList

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testGetCollection(){ MachineList ml=new MachineList(HOSTNAME_IP_CIDR_LIST); Collection col=ml.getCollection(); assertEquals(7,ml.getCollection().size()); for ( String item : StringUtils.getTrimmedStringCollection(HOSTNAME_IP_CIDR_LIST)) { assertTrue(col.contains(item)); } }

Class: org.apache.hadoop.util.TestNativeCodeLoader

BranchVerifier UtilityVerifier BooleanVerifier HybridVerifier 
@Test public void testNativeCodeLoaded(){ if (requireTestJni() == false) { LOG.info("TestNativeCodeLoader: libhadoop.so testing is not required."); return; } if (!NativeCodeLoader.isNativeCodeLoaded()) { fail("TestNativeCodeLoader: libhadoop.so testing was required, but " + "libhadoop.so was not loaded."); } assertFalse(NativeCodeLoader.getLibraryName().isEmpty()); assertFalse(ZlibFactory.getLibraryName().isEmpty()); if (NativeCodeLoader.buildSupportsSnappy()) { assertFalse(SnappyCodec.getLibraryName().isEmpty()); } if (NativeCodeLoader.buildSupportsOpenssl()) { assertFalse(OpensslCipher.getLibraryName().isEmpty()); } assertFalse(Lz4Codec.getLibraryName().isEmpty()); LOG.info("TestNativeCodeLoader: libhadoop.so is loaded."); }

Class: org.apache.hadoop.util.TestReflectionUtils

UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testCantCreate(){ try { ReflectionUtils.newInstance(NoDefaultCtor.class,null); fail("invalid call should fail"); } catch ( RuntimeException rte) { assertEquals(NoSuchMethodException.class,rte.getCause().getClass()); } }

APIUtilityVerifier IterativeVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@SuppressWarnings("unchecked") @Test public void testCacheDoesntLeak() throws Exception { int iterations=9999; for (int i=0; i < iterations; i++) { URLClassLoader loader=new URLClassLoader(new URL[0],getClass().getClassLoader()); Class cl=Class.forName("org.apache.hadoop.util.TestReflectionUtils$LoadedInChild",false,loader); Object o=ReflectionUtils.newInstance(cl,null); assertEquals(cl,o.getClass()); } System.gc(); assertTrue(cacheSize() + " too big",cacheSize() < iterations); }

Class: org.apache.hadoop.util.TestShutdownHookManager

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void shutdownHookManager(){ ShutdownHookManager mgr=ShutdownHookManager.get(); Assert.assertNotNull(mgr); Assert.assertEquals(0,mgr.getShutdownHooksInOrder().size()); Runnable hook1=new Runnable(){ @Override public void run(){ } } ; Runnable hook2=new Runnable(){ @Override public void run(){ } } ; mgr.addShutdownHook(hook1,0); Assert.assertTrue(mgr.hasShutdownHook(hook1)); Assert.assertEquals(1,mgr.getShutdownHooksInOrder().size()); Assert.assertEquals(hook1,mgr.getShutdownHooksInOrder().get(0)); mgr.removeShutdownHook(hook1); Assert.assertFalse(mgr.hasShutdownHook(hook1)); mgr.addShutdownHook(hook1,0); Assert.assertTrue(mgr.hasShutdownHook(hook1)); Assert.assertEquals(1,mgr.getShutdownHooksInOrder().size()); Assert.assertTrue(mgr.hasShutdownHook(hook1)); Assert.assertEquals(1,mgr.getShutdownHooksInOrder().size()); mgr.addShutdownHook(hook2,1); Assert.assertTrue(mgr.hasShutdownHook(hook1)); Assert.assertTrue(mgr.hasShutdownHook(hook2)); Assert.assertEquals(2,mgr.getShutdownHooksInOrder().size()); Assert.assertEquals(hook2,mgr.getShutdownHooksInOrder().get(0)); Assert.assertEquals(hook1,mgr.getShutdownHooksInOrder().get(1)); }

Class: org.apache.hadoop.util.TestShutdownThreadsHelper

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=3000) public void testShutdownThread(){ Thread thread=new Thread(sampleRunnable); thread.start(); boolean ret=ShutdownThreadsHelper.shutdownThread(thread); boolean isTerminated=!thread.isAlive(); assertEquals("Incorrect return value",ret,isTerminated); assertTrue("Thread is not shutdown",isTerminated); }

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testShutdownThreadPool() throws InterruptedException { ScheduledThreadPoolExecutor executor=new ScheduledThreadPoolExecutor(1); executor.execute(sampleRunnable); boolean ret=ShutdownThreadsHelper.shutdownExecutorService(executor); boolean isTerminated=executor.isTerminated(); assertEquals("Incorrect return value",ret,isTerminated); assertTrue("ExecutorService is not shutdown",isTerminated); }

Class: org.apache.hadoop.util.TestSignalLogger

UtilityVerifier AssumptionSetter HybridVerifier 
@Test(timeout=60000) public void testInstall() throws Exception { Assume.assumeTrue(SystemUtils.IS_OS_UNIX); SignalLogger.INSTANCE.register(LOG); try { SignalLogger.INSTANCE.register(LOG); Assert.fail("expected IllegalStateException from double registration"); } catch ( IllegalStateException e) { } }

Class: org.apache.hadoop.util.TestStringUtils

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testGetUniqueNonEmptyTrimmedStrings(){ final String TO_SPLIT=",foo, bar,baz,,blah,blah,bar,"; Collection col=StringUtils.getTrimmedStringCollection(TO_SPLIT); assertEquals(4,col.size()); assertTrue(col.containsAll(Arrays.asList(new String[]{"foo","bar","baz","blah"}))); }

UtilityVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testUnescapeString() throws Exception { assertEquals(NULL_STR,StringUtils.unEscapeString(NULL_STR)); assertEquals(EMPTY_STR,StringUtils.unEscapeString(EMPTY_STR)); assertEquals(STR_WO_SPECIAL_CHARS,StringUtils.unEscapeString(STR_WO_SPECIAL_CHARS)); try { StringUtils.unEscapeString(STR_WITH_COMMA); fail("Should throw IllegalArgumentException"); } catch ( IllegalArgumentException e) { } assertEquals(STR_WITH_COMMA,StringUtils.unEscapeString(ESCAPED_STR_WITH_COMMA)); try { StringUtils.unEscapeString(STR_WITH_ESCAPE); fail("Should throw IllegalArgumentException"); } catch ( IllegalArgumentException e) { } assertEquals(STR_WITH_ESCAPE,StringUtils.unEscapeString(ESCAPED_STR_WITH_ESCAPE)); try { StringUtils.unEscapeString(STR_WITH_BOTH2); fail("Should throw IllegalArgumentException"); } catch ( IllegalArgumentException e) { } assertEquals(STR_WITH_BOTH2,StringUtils.unEscapeString(ESCAPED_STR_WITH_BOTH2)); }

UtilityVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testStringToURI(){ String[] str=new String[]{"file://"}; try { StringUtils.stringToURI(str); fail("Ignoring URISyntaxException while creating URI from string file://"); } catch ( IllegalArgumentException iae) { assertEquals("Failed to create uri for file://",iae.getMessage()); } }

IterativeVerifier UtilityVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
@Test(timeout=30000) public void testTraditionalBinaryPrefix() throws Exception { String[] symbol={"k","m","g","t","p","e"}; long m=1024; for ( String s : symbol) { assertEquals(0,string2long(0 + s)); assertEquals(m,string2long(1 + s)); m*=1024; } assertEquals(0L,string2long("0")); assertEquals(1024L,string2long("1k")); assertEquals(-1024L,string2long("-1k")); assertEquals(1259520L,string2long("1230K")); assertEquals(-1259520L,string2long("-1230K")); assertEquals(104857600L,string2long("100m")); assertEquals(-104857600L,string2long("-100M")); assertEquals(956703965184L,string2long("891g")); assertEquals(-956703965184L,string2long("-891G")); assertEquals(501377302265856L,string2long("456t")); assertEquals(-501377302265856L,string2long("-456T")); assertEquals(11258999068426240L,string2long("10p")); assertEquals(-11258999068426240L,string2long("-10P")); assertEquals(1152921504606846976L,string2long("1e")); assertEquals(-1152921504606846976L,string2long("-1E")); String tooLargeNumStr="10e"; try { string2long(tooLargeNumStr); fail("Test passed for a number " + tooLargeNumStr + " too large"); } catch ( IllegalArgumentException e) { assertEquals(tooLargeNumStr + " does not fit in a Long",e.getMessage()); } String tooSmallNumStr="-10e"; try { string2long(tooSmallNumStr); fail("Test passed for a number " + tooSmallNumStr + " too small"); } catch ( IllegalArgumentException e) { assertEquals(tooSmallNumStr + " does not fit in a Long",e.getMessage()); } String invalidFormatNumStr="10kb"; char invalidPrefix='b'; try { string2long(invalidFormatNumStr); fail("Test passed for a number " + invalidFormatNumStr + " has invalid format"); } catch ( IllegalArgumentException e) { assertEquals("Invalid size prefix '" + invalidPrefix + "' in '"+ invalidFormatNumStr+ "'. Allowed prefixes are k, m, g, t, p, e(case insensitive)",e.getMessage()); } assertEquals("0",long2String(0,null,2)); for (int decimalPlace=0; decimalPlace < 2; decimalPlace++) { for (int n=1; n < TraditionalBinaryPrefix.KILO.value; n++) { assertEquals(n + "",long2String(n,null,decimalPlace)); assertEquals(-n + "",long2String(-n,null,decimalPlace)); } assertEquals("1 K",long2String(1L << 10,null,decimalPlace)); assertEquals("-1 K",long2String(-1L << 10,null,decimalPlace)); } assertEquals("8.00 E",long2String(Long.MAX_VALUE,null,2)); assertEquals("8.00 E",long2String(Long.MAX_VALUE - 1,null,2)); assertEquals("-8 E",long2String(Long.MIN_VALUE,null,2)); assertEquals("-8.00 E",long2String(Long.MIN_VALUE + 1,null,2)); final String[] zeros={" ",".0 ",".00 "}; for (int decimalPlace=0; decimalPlace < zeros.length; decimalPlace++) { final String trailingZeros=zeros[decimalPlace]; for (int e=11; e < Long.SIZE - 1; e++) { final TraditionalBinaryPrefix p=TraditionalBinaryPrefix.values()[e / 10 - 1]; { final long n=1L << e; final String expected=(n / p.value) + " " + p.symbol; assertEquals("n=" + n,expected,long2String(n,null,2)); } { final long n=(1L << e) + 1; final String expected=(n / p.value) + trailingZeros + p.symbol; assertEquals("n=" + n,expected,long2String(n,null,decimalPlace)); } { final long n=(1L << e) - 1; final String expected=((n + 1) / p.value) + trailingZeros + p.symbol; assertEquals("n=" + n,expected,long2String(n,null,decimalPlace)); } } } assertEquals("1.50 K",long2String(3L << 9,null,2)); assertEquals("1.5 K",long2String(3L << 9,null,1)); assertEquals("1.50 M",long2String(3L << 19,null,2)); assertEquals("2 M",long2String(3L << 19,null,0)); assertEquals("3 G",long2String(3L << 30,null,2)); assertEquals("0 B",StringUtils.byteDesc(0)); assertEquals("-100 B",StringUtils.byteDesc(-100)); assertEquals("1 KB",StringUtils.byteDesc(1024)); assertEquals("1.50 KB",StringUtils.byteDesc(3L << 9)); assertEquals("1.50 MB",StringUtils.byteDesc(3L << 19)); assertEquals("3 GB",StringUtils.byteDesc(3L << 30)); assertEquals("10%",StringUtils.formatPercent(0.1,0)); assertEquals("10.0%",StringUtils.formatPercent(0.1,1)); assertEquals("10.00%",StringUtils.formatPercent(0.1,2)); assertEquals("1%",StringUtils.formatPercent(0.00543,0)); assertEquals("0.5%",StringUtils.formatPercent(0.00543,1)); assertEquals("0.54%",StringUtils.formatPercent(0.00543,2)); assertEquals("0.543%",StringUtils.formatPercent(0.00543,3)); assertEquals("0.5430%",StringUtils.formatPercent(0.00543,4)); }

Class: org.apache.hadoop.util.TestWinUtils

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testLs() throws IOException { final String content="6bytes"; final int contentSize=content.length(); File testFile=new File(TEST_DIR,"file1"); writeFile(testFile,content); String output=Shell.execCommand(Shell.WINUTILS,"ls",testFile.getCanonicalPath()); String[] outputArgs=output.split("[ \r\n]"); assertTrue(outputArgs[0].equals("-rwx------")); assertTrue(outputArgs[outputArgs.length - 1].equals(testFile.getCanonicalPath())); output=Shell.execCommand(Shell.WINUTILS,"ls","-F",testFile.getCanonicalPath()); outputArgs=output.split("[|\r\n]"); assertEquals(9,outputArgs.length); assertTrue(outputArgs[0].equals("-rwx------")); assertEquals(contentSize,Long.parseLong(outputArgs[4])); assertTrue(outputArgs[8].equals(testFile.getCanonicalPath())); testFile.delete(); assertFalse(testFile.exists()); }

APIUtilityVerifier UtilityVerifier BooleanVerifier HybridVerifier 
@Test(timeout=30000) public void testSymlinkRejectsForwardSlashesInLink() throws IOException { File newFile=new File(TEST_DIR,"file"); assertTrue(newFile.createNewFile()); String target=newFile.getPath(); String link=new File(TEST_DIR,"link").getPath().replaceAll("\\\\","/"); try { Shell.execCommand(Shell.WINUTILS,"symlink",link,target); fail(String.format("did not receive expected failure creating symlink " + "with forward slashes in link: link = %s, target = %s",link,target)); } catch ( IOException e) { LOG.info("Expected: Failed to create symlink with forward slashes in target"); } }

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Validate behavior of chmod commands on directories on Windows. */ @Test(timeout=30000) public void testBasicChmodOnDir() throws IOException { File a=new File(TEST_DIR,"a"); File b=new File(a,"b"); a.mkdirs(); assertTrue(b.createNewFile()); chmod("300",a); String[] files=a.list(); assertTrue("Listing a directory without read permission should fail",null == files); chmod("700",a); files=a.list(); assertEquals("b",files[0]); chmod("500",a); File c=new File(a,"c"); try { c.createNewFile(); assertFalse("writeFile should have failed!",true); } catch ( IOException ex) { LOG.info("Expected: Failed to create a file when directory " + "permissions are 577"); } assertTrue("Special behavior: deleting a file will succeed on Windows " + "even if a user does not have write permissions on the parent dir",b.delete()); assertFalse("Renaming a file should fail on the dir where a user does " + "not have write permissions",b.renameTo(new File(a,"d"))); chmod("700",a); assertTrue(c.createNewFile()); File d=new File(a,"d"); assertTrue(c.renameTo(d)); chmod("600",a); files=a.list(); assertEquals("d",files[0]); assertTrue(d.delete()); File e=new File(a,"e"); assertTrue(e.createNewFile()); assertTrue(e.renameTo(new File(a,"f"))); chmod("700",a); }

APIUtilityVerifier UtilityVerifier BooleanVerifier HybridVerifier 
@Test(timeout=30000) public void testSymlinkRejectsForwardSlashesInTarget() throws IOException { File newFile=new File(TEST_DIR,"file"); assertTrue(newFile.createNewFile()); String target=newFile.getPath().replaceAll("\\\\","/"); String link=new File(TEST_DIR,"link").getPath(); try { Shell.execCommand(Shell.WINUTILS,"symlink",link,target); fail(String.format("did not receive expected failure creating symlink " + "with forward slashes in target: link = %s, target = %s",link,target)); } catch ( IOException e) { LOG.info("Expected: Failed to create symlink with forward slashes in target"); } }

APIUtilityVerifier UtilityVerifier BooleanVerifier ConditionMatcher HybridVerifier 
@Test(timeout=30000) public void testReadLink() throws IOException { File dir1=new File(TEST_DIR,"dir1"); assertTrue(dir1.mkdirs()); File file1=new File(dir1,"file1.txt"); assertTrue(file1.createNewFile()); File dirLink=new File(TEST_DIR,"dlink"); File fileLink=new File(TEST_DIR,"flink"); Shell.execCommand(Shell.WINUTILS,"symlink",dirLink.toString(),dir1.toString()); Shell.execCommand(Shell.WINUTILS,"symlink",fileLink.toString(),file1.toString()); String readLinkOutput=Shell.execCommand(Shell.WINUTILS,"readlink",dirLink.toString()); assertThat(readLinkOutput,equalTo(dir1.toString())); readLinkOutput=Shell.execCommand(Shell.WINUTILS,"readlink",fileLink.toString()); assertThat(readLinkOutput,equalTo(file1.toString())); try { Shell.execCommand(Shell.WINUTILS,"readlink",""); fail("Failed to get Shell.ExitCodeException when reading bad symlink"); } catch ( Shell.ExitCodeException ece) { assertThat(ece.getExitCode(),is(1)); } try { Shell.execCommand(Shell.WINUTILS,"readlink","ThereIsNoSuchLink"); fail("Failed to get Shell.ExitCodeException when reading bad symlink"); } catch ( Shell.ExitCodeException ece) { assertThat(ece.getExitCode(),is(1)); } try { Shell.execCommand(Shell.WINUTILS,"readlink",dir1.toString()); fail("Failed to get Shell.ExitCodeException when reading bad symlink"); } catch ( Shell.ExitCodeException ece) { assertThat(ece.getExitCode(),is(1)); } try { Shell.execCommand(Shell.WINUTILS,"readlink",file1.toString()); fail("Failed to get Shell.ExitCodeException when reading bad symlink"); } catch ( Shell.ExitCodeException ece) { assertThat(ece.getExitCode(),is(1)); } try { Shell.execCommand(Shell.WINUTILS,"readlink","a","b"); fail("Failed to get Shell.ExitCodeException with bad parameters"); } catch ( Shell.ExitCodeException ece) { assertThat(ece.getExitCode(),is(1)); } }

TestInitializer AssumptionSetter HybridVerifier 
@Before public void setUp(){ assumeTrue(Shell.WINDOWS); TEST_DIR.mkdirs(); }

Class: org.apache.hadoop.util.TestZKUtil

UtilityVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testConfIndirection() throws IOException { assertNull(ZKUtil.resolveConfIndirection(null)); assertEquals("x",ZKUtil.resolveConfIndirection("x")); TEST_FILE.getParentFile().mkdirs(); Files.write("hello world",TEST_FILE,Charsets.UTF_8); assertEquals("hello world",ZKUtil.resolveConfIndirection("@" + TEST_FILE.getAbsolutePath())); try { ZKUtil.resolveConfIndirection("@" + BOGUS_FILE); fail("Did not throw for non-existent file reference"); } catch ( FileNotFoundException fnfe) { assertTrue(fnfe.getMessage().startsWith(BOGUS_FILE)); } }

Class: org.apache.hadoop.util.hash.TestHash

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier 
@Test public void testHash(){ int iterations=30; assertTrue("testHash jenkins error !!!",Hash.JENKINS_HASH == Hash.parseHashType("jenkins")); assertTrue("testHash murmur error !!!",Hash.MURMUR_HASH == Hash.parseHashType("murmur")); assertTrue("testHash undefined",Hash.INVALID_HASH == Hash.parseHashType("undefined")); Configuration cfg=new Configuration(); cfg.set("hadoop.util.hash.type","murmur"); assertTrue("testHash",MurmurHash.getInstance() == Hash.getInstance(cfg)); cfg=new Configuration(); cfg.set("hadoop.util.hash.type","jenkins"); assertTrue("testHash jenkins configuration error !!!",JenkinsHash.getInstance() == Hash.getInstance(cfg)); cfg=new Configuration(); assertTrue("testHash undefine configuration error !!!",MurmurHash.getInstance() == Hash.getInstance(cfg)); assertTrue("testHash error jenkin getInstance !!!",JenkinsHash.getInstance() == Hash.getInstance(Hash.JENKINS_HASH)); assertTrue("testHash error murmur getInstance !!!",MurmurHash.getInstance() == Hash.getInstance(Hash.MURMUR_HASH)); assertNull("testHash error invalid getInstance !!!",Hash.getInstance(Hash.INVALID_HASH)); int murmurHash=Hash.getInstance(Hash.MURMUR_HASH).hash(LINE.getBytes()); for (int i=0; i < iterations; i++) { assertTrue("multiple evaluation murmur hash error !!!",murmurHash == Hash.getInstance(Hash.MURMUR_HASH).hash(LINE.getBytes())); } murmurHash=Hash.getInstance(Hash.MURMUR_HASH).hash(LINE.getBytes(),67); for (int i=0; i < iterations; i++) { assertTrue("multiple evaluation murmur hash error !!!",murmurHash == Hash.getInstance(Hash.MURMUR_HASH).hash(LINE.getBytes(),67)); } int jenkinsHash=Hash.getInstance(Hash.JENKINS_HASH).hash(LINE.getBytes()); for (int i=0; i < iterations; i++) { assertTrue("multiple evaluation jenkins hash error !!!",jenkinsHash == Hash.getInstance(Hash.JENKINS_HASH).hash(LINE.getBytes())); } jenkinsHash=Hash.getInstance(Hash.JENKINS_HASH).hash(LINE.getBytes(),67); for (int i=0; i < iterations; i++) { assertTrue("multiple evaluation jenkins hash error !!!",jenkinsHash == Hash.getInstance(Hash.JENKINS_HASH).hash(LINE.getBytes(),67)); } }

Class: org.apache.hadoop.yarn.TestRPC

UtilityVerifier BooleanVerifier HybridVerifier 
@Test public void testUnknownCall(){ Configuration conf=new Configuration(); conf.set(YarnConfiguration.IPC_RPC_IMPL,HadoopYarnProtoRPC.class.getName()); YarnRPC rpc=YarnRPC.create(conf); String bindAddr="localhost:0"; InetSocketAddress addr=NetUtils.createSocketAddr(bindAddr); Server server=rpc.getServer(ContainerManagementProtocol.class,new DummyContainerManager(),addr,conf,null,1); server.start(); ApplicationClientProtocol proxy=(ApplicationClientProtocol)rpc.getProxy(ApplicationClientProtocol.class,NetUtils.getConnectAddress(server),conf); try { proxy.getNewApplication(Records.newRecord(GetNewApplicationRequest.class)); Assert.fail("Excepted RPC call to fail with unknown method."); } catch ( YarnException e) { Assert.assertTrue(e.getMessage().matches("Unknown method getNewApplication called on.*" + "org.apache.hadoop.yarn.proto.ApplicationClientProtocol" + "\\$ApplicationClientProtocolService\\$BlockingInterface protocol.")); } catch ( Exception e) { e.printStackTrace(); } }

Class: org.apache.hadoop.yarn.TestRecordFactory

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testPbRecordFactory(){ RecordFactory pbRecordFactory=RecordFactoryPBImpl.get(); try { AllocateResponse response=pbRecordFactory.newRecordInstance(AllocateResponse.class); Assert.assertEquals(AllocateResponsePBImpl.class,response.getClass()); } catch ( YarnRuntimeException e) { e.printStackTrace(); Assert.fail("Failed to crete record"); } try { AllocateRequest response=pbRecordFactory.newRecordInstance(AllocateRequest.class); Assert.assertEquals(AllocateRequestPBImpl.class,response.getClass()); } catch ( YarnRuntimeException e) { e.printStackTrace(); Assert.fail("Failed to crete record"); } }

Class: org.apache.hadoop.yarn.TestResourceTrackerPBClientImpl

UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier 
/** * Test the method registerNodeManager. Method should return a not null * result. */ @Test public void testResourceTrackerPBClientImpl() throws Exception { RegisterNodeManagerRequest request=recordFactory.newRecordInstance(RegisterNodeManagerRequest.class); assertNotNull(client.registerNodeManager(request)); ResourceTrackerTestImpl.exception=true; try { client.registerNodeManager(request); fail("there should be YarnException"); } catch ( YarnException e) { assertTrue(e.getMessage().startsWith("testMessage")); } finally { ResourceTrackerTestImpl.exception=false; } }

UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier 
/** * Test the method nodeHeartbeat. Method should return a not null result. */ @Test public void testNodeHeartbeat() throws Exception { NodeHeartbeatRequest request=recordFactory.newRecordInstance(NodeHeartbeatRequest.class); assertNotNull(client.nodeHeartbeat(request)); ResourceTrackerTestImpl.exception=true; try { client.nodeHeartbeat(request); fail("there should be YarnException"); } catch ( YarnException e) { assertTrue(e.getMessage().startsWith("testMessage")); } finally { ResourceTrackerTestImpl.exception=false; } }

Class: org.apache.hadoop.yarn.TestRpcFactoryProvider

APIUtilityVerifier UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testFactoryProvider(){ Configuration conf=new Configuration(); RpcClientFactory clientFactory=null; RpcServerFactory serverFactory=null; clientFactory=RpcFactoryProvider.getClientFactory(conf); serverFactory=RpcFactoryProvider.getServerFactory(conf); Assert.assertEquals(RpcClientFactoryPBImpl.class,clientFactory.getClass()); Assert.assertEquals(RpcServerFactoryPBImpl.class,serverFactory.getClass()); conf.set(YarnConfiguration.IPC_CLIENT_FACTORY_CLASS,"unknown"); conf.set(YarnConfiguration.IPC_SERVER_FACTORY_CLASS,"unknown"); conf.set(YarnConfiguration.IPC_RECORD_FACTORY_CLASS,"unknown"); try { clientFactory=RpcFactoryProvider.getClientFactory(conf); Assert.fail("Expected an exception - unknown serializer"); } catch ( YarnRuntimeException e) { } try { serverFactory=RpcFactoryProvider.getServerFactory(conf); Assert.fail("Expected an exception - unknown serializer"); } catch ( YarnRuntimeException e) { } conf=new Configuration(); conf.set(YarnConfiguration.IPC_CLIENT_FACTORY_CLASS,"NonExistantClass"); conf.set(YarnConfiguration.IPC_SERVER_FACTORY_CLASS,RpcServerFactoryPBImpl.class.getName()); try { clientFactory=RpcFactoryProvider.getClientFactory(conf); Assert.fail("Expected an exception - unknown class"); } catch ( YarnRuntimeException e) { } try { serverFactory=RpcFactoryProvider.getServerFactory(conf); } catch ( YarnRuntimeException e) { Assert.fail("Error while loading factory using reflection: [" + RpcServerFactoryPBImpl.class.getName() + "]"); } }

Class: org.apache.hadoop.yarn.TestYSCRecordFactory

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testPbRecordFactory(){ RecordFactory pbRecordFactory=RecordFactoryPBImpl.get(); try { NodeHeartbeatRequest request=pbRecordFactory.newRecordInstance(NodeHeartbeatRequest.class); Assert.assertEquals(NodeHeartbeatRequestPBImpl.class,request.getClass()); } catch ( YarnRuntimeException e) { e.printStackTrace(); Assert.fail("Failed to crete record"); } }

Class: org.apache.hadoop.yarn.TestYarnServerApiClasses

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test MasterKeyPBImpl. */ @Test public void testMasterKeyPBImpl(){ MasterKeyPBImpl original=new MasterKeyPBImpl(); original.setBytes(ByteBuffer.allocate(0)); original.setKeyId(1); MasterKeyPBImpl copy=new MasterKeyPBImpl(original.getProto()); assertEquals(1,copy.getKeyId()); assertTrue(original.equals(copy)); assertEquals(original.hashCode(),copy.hashCode()); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test SerializedExceptionPBImpl. */ @Test public void testSerializedExceptionPBImpl(){ SerializedExceptionPBImpl original=new SerializedExceptionPBImpl(); original.init("testMessage"); SerializedExceptionPBImpl copy=new SerializedExceptionPBImpl(original.getProto()); assertEquals("testMessage",copy.getMessage()); original=new SerializedExceptionPBImpl(); original.init("testMessage",new Throwable(new Throwable("parent"))); copy=new SerializedExceptionPBImpl(original.getProto()); assertEquals("testMessage",copy.getMessage()); assertEquals("parent",copy.getCause().getMessage()); assertTrue(copy.getRemoteTrace().startsWith("java.lang.Throwable: java.lang.Throwable: parent")); }

Class: org.apache.hadoop.yarn.api.TestApplicationAttemptId

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testApplicationAttemptId(){ ApplicationAttemptId a1=createAppAttemptId(10l,1,1); ApplicationAttemptId a2=createAppAttemptId(10l,1,2); ApplicationAttemptId a3=createAppAttemptId(10l,2,1); ApplicationAttemptId a4=createAppAttemptId(8l,1,4); ApplicationAttemptId a5=createAppAttemptId(10l,1,1); Assert.assertTrue(a1.equals(a5)); Assert.assertFalse(a1.equals(a2)); Assert.assertFalse(a1.equals(a3)); Assert.assertFalse(a1.equals(a4)); Assert.assertTrue(a1.compareTo(a5) == 0); Assert.assertTrue(a1.compareTo(a2) < 0); Assert.assertTrue(a1.compareTo(a3) < 0); Assert.assertTrue(a1.compareTo(a4) > 0); Assert.assertTrue(a1.hashCode() == a5.hashCode()); Assert.assertFalse(a1.hashCode() == a2.hashCode()); Assert.assertFalse(a1.hashCode() == a3.hashCode()); Assert.assertFalse(a1.hashCode() == a4.hashCode()); long ts=System.currentTimeMillis(); ApplicationAttemptId a6=createAppAttemptId(ts,543627,33492611); Assert.assertEquals("appattempt_10_0001_000001",a1.toString()); Assert.assertEquals("appattempt_" + ts + "_543627_33492611",a6.toString()); }

Class: org.apache.hadoop.yarn.api.TestApplicationId

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testApplicationId(){ ApplicationId a1=ApplicationId.newInstance(10l,1); ApplicationId a2=ApplicationId.newInstance(10l,2); ApplicationId a3=ApplicationId.newInstance(10l,1); ApplicationId a4=ApplicationId.newInstance(8l,3); Assert.assertFalse(a1.equals(a2)); Assert.assertFalse(a1.equals(a4)); Assert.assertTrue(a1.equals(a3)); Assert.assertTrue(a1.compareTo(a2) < 0); Assert.assertTrue(a1.compareTo(a3) == 0); Assert.assertTrue(a1.compareTo(a4) > 0); Assert.assertTrue(a1.hashCode() == a3.hashCode()); Assert.assertFalse(a1.hashCode() == a2.hashCode()); Assert.assertFalse(a2.hashCode() == a4.hashCode()); long ts=System.currentTimeMillis(); ApplicationId a5=ApplicationId.newInstance(ts,45436343); Assert.assertEquals("application_10_0001",a1.toString()); Assert.assertEquals("application_" + ts + "_45436343",a5.toString()); }

Class: org.apache.hadoop.yarn.api.TestApplicatonReport

APIUtilityVerifier InternalCallVerifier IdentityVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testApplicationReport(){ long timestamp=System.currentTimeMillis(); ApplicationReport appReport1=createApplicationReport(1,1,timestamp); ApplicationReport appReport2=createApplicationReport(1,1,timestamp); ApplicationReport appReport3=createApplicationReport(1,1,timestamp); Assert.assertEquals(appReport1,appReport2); Assert.assertEquals(appReport2,appReport3); appReport1.setApplicationId(null); Assert.assertNull(appReport1.getApplicationId()); Assert.assertNotSame(appReport1,appReport2); appReport2.setCurrentApplicationAttemptId(null); Assert.assertNull(appReport2.getCurrentApplicationAttemptId()); Assert.assertNotSame(appReport2,appReport3); Assert.assertNull(appReport1.getAMRMToken()); }

Class: org.apache.hadoop.yarn.api.TestContainerId

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testContainerId(){ ContainerId c1=newContainerId(1,1,10l,1); ContainerId c2=newContainerId(1,1,10l,2); ContainerId c3=newContainerId(1,1,10l,1); ContainerId c4=newContainerId(1,3,10l,1); ContainerId c5=newContainerId(1,3,8l,1); Assert.assertTrue(c1.equals(c3)); Assert.assertFalse(c1.equals(c2)); Assert.assertFalse(c1.equals(c4)); Assert.assertFalse(c1.equals(c5)); Assert.assertTrue(c1.compareTo(c3) == 0); Assert.assertTrue(c1.compareTo(c2) < 0); Assert.assertTrue(c1.compareTo(c4) < 0); Assert.assertTrue(c1.compareTo(c5) > 0); Assert.assertTrue(c1.hashCode() == c3.hashCode()); Assert.assertFalse(c1.hashCode() == c2.hashCode()); Assert.assertFalse(c1.hashCode() == c4.hashCode()); Assert.assertFalse(c1.hashCode() == c5.hashCode()); long ts=System.currentTimeMillis(); ContainerId c6=newContainerId(36473,4365472,ts,25645811); Assert.assertEquals("container_10_0001_01_000001",c1.toString()); Assert.assertEquals("container_" + ts + "_36473_4365472_25645811",c6.toString()); }

Class: org.apache.hadoop.yarn.api.TestContainerResourceIncrease

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testResourceIncreaseContext(){ byte[] identifier=new byte[]{1,2,3,4}; Token token=Token.newInstance(identifier,"","".getBytes(),""); ContainerId containerId=ContainerId.newInstance(ApplicationAttemptId.newInstance(ApplicationId.newInstance(1234,3),3),7); Resource resource=Resource.newInstance(1023,3); ContainerResourceIncrease ctx=ContainerResourceIncrease.newInstance(containerId,resource,token); ContainerResourceIncreaseProto proto=((ContainerResourceIncreasePBImpl)ctx).getProto(); ctx=new ContainerResourceIncreasePBImpl(proto); Assert.assertEquals(ctx.getCapability(),resource); Assert.assertEquals(ctx.getContainerId(),containerId); Assert.assertTrue(Arrays.equals(ctx.getContainerToken().getIdentifier().array(),identifier)); }

Class: org.apache.hadoop.yarn.api.TestNodeId

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testNodeId(){ NodeId nodeId1=NodeId.newInstance("10.18.52.124",8041); NodeId nodeId2=NodeId.newInstance("10.18.52.125",8038); NodeId nodeId3=NodeId.newInstance("10.18.52.124",8041); NodeId nodeId4=NodeId.newInstance("10.18.52.124",8039); Assert.assertTrue(nodeId1.equals(nodeId3)); Assert.assertFalse(nodeId1.equals(nodeId2)); Assert.assertFalse(nodeId3.equals(nodeId4)); Assert.assertTrue(nodeId1.compareTo(nodeId3) == 0); Assert.assertTrue(nodeId1.compareTo(nodeId2) < 0); Assert.assertTrue(nodeId3.compareTo(nodeId4) > 0); Assert.assertTrue(nodeId1.hashCode() == nodeId3.hashCode()); Assert.assertFalse(nodeId1.hashCode() == nodeId2.hashCode()); Assert.assertFalse(nodeId3.hashCode() == nodeId4.hashCode()); Assert.assertEquals("10.18.52.124:8041",nodeId1.toString()); }

Class: org.apache.hadoop.yarn.api.records.impl.pb.TestSerializedExceptionPBImpl

NullVerifier EqualityVerifier HybridVerifier 
@Test public void testBeforeInit() throws Exception { SerializedExceptionProto defaultProto=SerializedExceptionProto.newBuilder().build(); SerializedExceptionPBImpl pb1=new SerializedExceptionPBImpl(); Assert.assertNull(pb1.getCause()); SerializedExceptionPBImpl pb2=new SerializedExceptionPBImpl(); Assert.assertEquals(defaultProto,pb2.getProto()); SerializedExceptionPBImpl pb3=new SerializedExceptionPBImpl(); Assert.assertEquals(defaultProto.getTrace(),pb3.getRemoteTrace()); }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testDeserialize() throws Exception { Exception ex=new Exception("test exception"); SerializedExceptionPBImpl pb=new SerializedExceptionPBImpl(); try { pb.deSerialize(); Assert.fail("deSerialze should throw YarnRuntimeException"); } catch ( YarnRuntimeException e) { Assert.assertEquals(ClassNotFoundException.class,e.getCause().getClass()); } pb.init(ex); Assert.assertEquals(ex.toString(),pb.deSerialize().toString()); }

Class: org.apache.hadoop.yarn.applications.distributedshell.TestDistributedShell

UtilityVerifier BooleanVerifier HybridVerifier 
@Test(timeout=90000) public void testDSShellWithInvalidArgs() throws Exception { Client client=new Client(new Configuration(yarnCluster.getConfig())); LOG.info("Initializing DS Client with no args"); try { client.init(new String[]{}); Assert.fail("Exception is expected"); } catch ( IllegalArgumentException e) { Assert.assertTrue("The throw exception is not expected",e.getMessage().contains("No args")); } LOG.info("Initializing DS Client with no jar file"); try { String[] args={"--num_containers","2","--shell_command",Shell.WINDOWS ? "dir" : "ls","--master_memory","512","--container_memory","128"}; client.init(args); Assert.fail("Exception is expected"); } catch ( IllegalArgumentException e) { Assert.assertTrue("The throw exception is not expected",e.getMessage().contains("No jar")); } LOG.info("Initializing DS Client with no shell command"); try { String[] args={"--jar",APPMASTER_JAR,"--num_containers","2","--master_memory","512","--container_memory","128"}; client.init(args); Assert.fail("Exception is expected"); } catch ( IllegalArgumentException e) { Assert.assertTrue("The throw exception is not expected",e.getMessage().contains("No shell command")); } LOG.info("Initializing DS Client with invalid no. of containers"); try { String[] args={"--jar",APPMASTER_JAR,"--num_containers","-1","--shell_command",Shell.WINDOWS ? "dir" : "ls","--master_memory","512","--container_memory","128"}; client.init(args); Assert.fail("Exception is expected"); } catch ( IllegalArgumentException e) { Assert.assertTrue("The throw exception is not expected",e.getMessage().contains("Invalid no. of containers")); } LOG.info("Initializing DS Client with invalid no. of vcores"); try { String[] args={"--jar",APPMASTER_JAR,"--num_containers","2","--shell_command",Shell.WINDOWS ? "dir" : "ls","--master_memory","512","--master_vcores","-2","--container_memory","128","--container_vcores","1"}; client.init(args); Assert.fail("Exception is expected"); } catch ( IllegalArgumentException e) { Assert.assertTrue("The throw exception is not expected",e.getMessage().contains("Invalid virtual cores specified")); } LOG.info("Initializing DS Client with --shell_command and --shell_script"); try { String[] args={"--jar",APPMASTER_JAR,"--num_containers","2","--shell_command",Shell.WINDOWS ? "dir" : "ls","--master_memory","512","--master_vcores","2","--container_memory","128","--container_vcores","1","--shell_script","test.sh"}; client.init(args); Assert.fail("Exception is expected"); } catch ( IllegalArgumentException e) { Assert.assertTrue("The throw exception is not expected",e.getMessage().contains("Can not specify shell_command option " + "and shell_script option at the same time")); } LOG.info("Initializing DS Client without --shell_command and --shell_script"); try { String[] args={"--jar",APPMASTER_JAR,"--num_containers","2","--master_memory","512","--master_vcores","2","--container_memory","128","--container_vcores","1"}; client.init(args); Assert.fail("Exception is expected"); } catch ( IllegalArgumentException e) { Assert.assertTrue("The throw exception is not expected",e.getMessage().contains("No shell command or shell script specified " + "to be executed by application master")); } }

APIUtilityVerifier BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
@Test(timeout=90000) public void testDSShellWithCustomLogPropertyFile() throws Exception { final File basedir=new File("target",TestDistributedShell.class.getName()); final File tmpDir=new File(basedir,"tmpDir"); tmpDir.mkdirs(); final File customLogProperty=new File(tmpDir,"custom_log4j.properties"); if (customLogProperty.exists()) { customLogProperty.delete(); } if (!customLogProperty.createNewFile()) { Assert.fail("Can not create custom log4j property file."); } PrintWriter fileWriter=new PrintWriter(customLogProperty); fileWriter.write("log4j.rootLogger=debug,stdout"); fileWriter.close(); String[] args={"--jar",APPMASTER_JAR,"--num_containers","3","--shell_command","echo","--shell_args","HADOOP","--log_properties",customLogProperty.getAbsolutePath(),"--master_memory","512","--master_vcores","2","--container_memory","128","--container_vcores","1"}; final Log LOG_Client=LogFactory.getLog(Client.class); Assert.assertTrue(LOG_Client.isInfoEnabled()); Assert.assertFalse(LOG_Client.isDebugEnabled()); final Log LOG_AM=LogFactory.getLog(ApplicationMaster.class); Assert.assertTrue(LOG_AM.isInfoEnabled()); Assert.assertFalse(LOG_AM.isDebugEnabled()); LOG.info("Initializing DS Client"); final Client client=new Client(new Configuration(yarnCluster.getConfig())); boolean initSuccess=client.init(args); Assert.assertTrue(initSuccess); LOG.info("Running DS Client"); boolean result=client.run(); LOG.info("Client run completed. Result=" + result); Assert.assertTrue(verifyContainerLog(3,null,true,"DEBUG") > 10); Assert.assertTrue(LOG_Client.isInfoEnabled()); Assert.assertTrue(LOG_Client.isDebugEnabled()); Assert.assertTrue(LOG_AM.isInfoEnabled()); Assert.assertTrue(LOG_AM.isDebugEnabled()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=90000) public void testDSShell() throws Exception { String[] args={"--jar",APPMASTER_JAR,"--num_containers","2","--shell_command",Shell.WINDOWS ? "dir" : "ls","--master_memory","512","--master_vcores","2","--container_memory","128","--container_vcores","1"}; LOG.info("Initializing DS Client"); final Client client=new Client(new Configuration(yarnCluster.getConfig())); boolean initSuccess=client.init(args); Assert.assertTrue(initSuccess); LOG.info("Running DS Client"); final AtomicBoolean result=new AtomicBoolean(false); Thread t=new Thread(){ public void run(){ try { result.set(client.run()); } catch ( Exception e) { throw new RuntimeException(e); } } } ; t.start(); YarnClient yarnClient=YarnClient.createYarnClient(); yarnClient.init(new Configuration(yarnCluster.getConfig())); yarnClient.start(); String hostName=NetUtils.getHostname(); boolean verified=false; String errorMessage=""; while (!verified) { List apps=yarnClient.getApplications(); if (apps.size() == 0) { Thread.sleep(10); continue; } ApplicationReport appReport=apps.get(0); if (appReport.getHost().equals("N/A")) { Thread.sleep(10); continue; } errorMessage="Expected host name to start with '" + hostName + "', was '"+ appReport.getHost()+ "'. Expected rpc port to be '-1', was '"+ appReport.getRpcPort()+ "'."; if (checkHostname(appReport.getHost()) && appReport.getRpcPort() == -1) { verified=true; } if (appReport.getYarnApplicationState() == YarnApplicationState.FINISHED) { break; } } Assert.assertTrue(errorMessage,verified); t.join(); LOG.info("Client run completed. Result=" + result); Assert.assertTrue(result.get()); TimelineEntities entitiesAttempts=yarnCluster.getApplicationHistoryServer().getTimelineStore().getEntities(ApplicationMaster.DSEntity.DS_APP_ATTEMPT.toString(),null,null,null,null,null,null,null,null); Assert.assertNotNull(entitiesAttempts); Assert.assertEquals(1,entitiesAttempts.getEntities().size()); Assert.assertEquals(2,entitiesAttempts.getEntities().get(0).getEvents().size()); Assert.assertEquals(entitiesAttempts.getEntities().get(0).getEntityType().toString(),ApplicationMaster.DSEntity.DS_APP_ATTEMPT.toString()); TimelineEntities entities=yarnCluster.getApplicationHistoryServer().getTimelineStore().getEntities(ApplicationMaster.DSEntity.DS_CONTAINER.toString(),null,null,null,null,null,null,null,null); Assert.assertNotNull(entities); Assert.assertEquals(2,entities.getEntities().size()); Assert.assertEquals(entities.getEntities().get(0).getEntityType().toString(),ApplicationMaster.DSEntity.DS_CONTAINER.toString()); }

APIUtilityVerifier BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
@Test(timeout=90000) public void testDSShellWithShellScript() throws Exception { final File basedir=new File("target",TestDistributedShell.class.getName()); final File tmpDir=new File(basedir,"tmpDir"); tmpDir.mkdirs(); final File customShellScript=new File(tmpDir,"custom_script.sh"); if (customShellScript.exists()) { customShellScript.delete(); } if (!customShellScript.createNewFile()) { Assert.fail("Can not create custom shell script file."); } PrintWriter fileWriter=new PrintWriter(customShellScript); fileWriter.write("echo testDSShellWithShellScript"); fileWriter.close(); System.out.println(customShellScript.getAbsolutePath()); String[] args={"--jar",APPMASTER_JAR,"--num_containers","1","--shell_script",customShellScript.getAbsolutePath(),"--master_memory","512","--master_vcores","2","--container_memory","128","--container_vcores","1"}; LOG.info("Initializing DS Client"); final Client client=new Client(new Configuration(yarnCluster.getConfig())); boolean initSuccess=client.init(args); Assert.assertTrue(initSuccess); LOG.info("Running DS Client"); boolean result=client.run(); LOG.info("Client run completed. Result=" + result); List expectedContent=new ArrayList(); expectedContent.add("testDSShellWithShellScript"); verifyContainerLog(1,expectedContent,false,""); }

Class: org.apache.hadoop.yarn.applications.unmanagedamlauncher.TestUnmanagedAMLauncher

UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
@Test(timeout=30000) public void testUMALauncherError() throws Exception { String classpath=getTestRuntimeClasspath(); String javaHome=System.getenv("JAVA_HOME"); if (javaHome == null) { LOG.fatal("JAVA_HOME not defined. Test not running."); return; } String[] args={"--classpath",classpath,"--queue","default","--cmd",javaHome + "/bin/java -Xmx512m " + TestUnmanagedAMLauncher.class.getCanonicalName()+ " failure"}; LOG.info("Initializing Launcher"); UnmanagedAMLauncher launcher=new UnmanagedAMLauncher(new Configuration(yarnCluster.getConfig())); boolean initSuccess=launcher.init(args); Assert.assertTrue(initSuccess); LOG.info("Running Launcher"); try { launcher.run(); fail("Expected an exception to occur as launch should have failed"); } catch ( RuntimeException e) { } }

Class: org.apache.hadoop.yarn.client.TestApplicationClientProtocolOnHA

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=15000) public void testGetClusterNodesOnHA() throws Exception { List reports=client.getNodeReports(NodeState.RUNNING); Assert.assertTrue(reports != null && !reports.isEmpty()); Assert.assertEquals(cluster.createFakeNodeReports(),reports); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=15000) public void testGetApplicationAttemptsOnHA() throws Exception { List reports=client.getApplicationAttempts(cluster.createFakeAppId()); Assert.assertTrue(reports != null && !reports.isEmpty()); Assert.assertEquals(cluster.createFakeApplicationAttemptReports(),reports); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=15000) public void testGetContainersOnHA() throws Exception { List reports=client.getContainers(cluster.createFakeApplicationAttemptId()); Assert.assertTrue(reports != null && !reports.isEmpty()); Assert.assertEquals(cluster.createFakeContainerReports(),reports); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=15000) public void testGetNewApplicationOnHA() throws Exception { ApplicationId appId=client.createApplication().getApplicationSubmissionContext().getApplicationId(); Assert.assertTrue(appId != null); Assert.assertEquals(cluster.createFakeAppId(),appId); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=15000) public void testGetQueueInfoOnHA() throws Exception { QueueInfo queueInfo=client.getQueueInfo("root"); Assert.assertTrue(queueInfo != null); Assert.assertEquals(cluster.createFakeQueueInfo(),queueInfo); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=15000) public void testGetApplicationAttemptReportOnHA() throws Exception { ApplicationAttemptReport report=client.getApplicationAttemptReport(cluster.createFakeApplicationAttemptId()); Assert.assertTrue(report != null); Assert.assertEquals(cluster.createFakeApplicationAttemptReport(),report); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=15000) public void testGetApplicationReportOnHA() throws Exception { ApplicationReport report=client.getApplicationReport(cluster.createFakeAppId()); Assert.assertTrue(report != null); Assert.assertEquals(cluster.createFakeAppReport(),report); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=15000) public void testGetContainerReportOnHA() throws Exception { ContainerReport report=client.getContainerReport(cluster.createFakeContainerId()); Assert.assertTrue(report != null); Assert.assertEquals(cluster.createFakeContainerReport(),report); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=15000) public void testGetQueueUserAclsOnHA() throws Exception { List queueUserAclsList=client.getQueueAclsInfo(); Assert.assertTrue(queueUserAclsList != null && !queueUserAclsList.isEmpty()); Assert.assertEquals(cluster.createFakeQueueUserACLInfoList(),queueUserAclsList); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=15000) public void testGetApplicationsOnHA() throws Exception { List reports=client.getApplications(); Assert.assertTrue(reports != null && !reports.isEmpty()); Assert.assertEquals(cluster.createFakeAppReports(),reports); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=15000) public void testGetClusterMetricsOnHA() throws Exception { YarnClusterMetrics clusterMetrics=client.getYarnClusterMetrics(); Assert.assertTrue(clusterMetrics != null); Assert.assertEquals(cluster.createFakeYarnClusterMetrics(),clusterMetrics); }

Class: org.apache.hadoop.yarn.client.TestClientRMProxy

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testGetRMDelegationTokenService(){ String defaultRMAddress=YarnConfiguration.DEFAULT_RM_ADDRESS; YarnConfiguration conf=new YarnConfiguration(); Text tokenService=ClientRMProxy.getRMDelegationTokenService(conf); String[] services=tokenService.toString().split(","); assertEquals(1,services.length); for ( String service : services) { assertTrue("Incorrect token service name",service.contains(defaultRMAddress)); } conf.setBoolean(YarnConfiguration.RM_HA_ENABLED,true); conf.set(YarnConfiguration.RM_HA_IDS,"rm1,rm2"); conf.set(HAUtil.addSuffix(YarnConfiguration.RM_HOSTNAME,"rm1"),"0.0.0.0"); conf.set(HAUtil.addSuffix(YarnConfiguration.RM_HOSTNAME,"rm2"),"0.0.0.0"); tokenService=ClientRMProxy.getRMDelegationTokenService(conf); services=tokenService.toString().split(","); assertEquals(2,services.length); for ( String service : services) { assertTrue("Incorrect token service name",service.contains(defaultRMAddress)); } }

Class: org.apache.hadoop.yarn.client.TestRMAdminCLI

BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=500) public void testException() throws Exception { PrintStream oldErrPrintStream=System.err; ByteArrayOutputStream dataErr=new ByteArrayOutputStream(); System.setErr(new PrintStream(dataErr)); try { when(admin.refreshQueues(any(RefreshQueuesRequest.class))).thenThrow(new IOException("test exception")); String[] args={"-refreshQueues"}; assertEquals(-1,rmAdminCLI.run(args)); verify(admin).refreshQueues(any(RefreshQueuesRequest.class)); assertTrue(dataErr.toString().contains("refreshQueues: test exception")); } finally { System.setErr(oldErrPrintStream); } }

BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test printing of help messages */ @Test(timeout=500) public void testHelp() throws Exception { PrintStream oldOutPrintStream=System.out; PrintStream oldErrPrintStream=System.err; ByteArrayOutputStream dataOut=new ByteArrayOutputStream(); ByteArrayOutputStream dataErr=new ByteArrayOutputStream(); System.setOut(new PrintStream(dataOut)); System.setErr(new PrintStream(dataErr)); try { String[] args={"-help"}; assertEquals(0,rmAdminCLI.run(args)); oldOutPrintStream.println(dataOut); assertTrue(dataOut.toString().contains("rmadmin is the command to execute YARN administrative commands.")); assertTrue(dataOut.toString().contains("yarn rmadmin [-refreshQueues] [-refreshNodes] [-refreshSuper" + "UserGroupsConfiguration] [-refreshUserToGroupsMappings] " + "[-refreshAdminAcls] [-refreshServiceAcl] [-getGroup"+ " [username]] [-help [cmd]]")); assertTrue(dataOut.toString().contains("-refreshQueues: Reload the queues' acls, states and scheduler " + "specific properties.")); assertTrue(dataOut.toString().contains("-refreshNodes: Refresh the hosts information at the " + "ResourceManager.")); assertTrue(dataOut.toString().contains("-refreshUserToGroupsMappings: Refresh user-to-groups mappings")); assertTrue(dataOut.toString().contains("-refreshSuperUserGroupsConfiguration: Refresh superuser proxy" + " groups mappings")); assertTrue(dataOut.toString().contains("-refreshAdminAcls: Refresh acls for administration of " + "ResourceManager")); assertTrue(dataOut.toString().contains("-refreshServiceAcl: Reload the service-level authorization" + " policy file")); assertTrue(dataOut.toString().contains("-help [cmd]: Displays help for the given command or all " + "commands if none")); testError(new String[]{"-help","-refreshQueues"},"Usage: yarn rmadmin [-refreshQueues]",dataErr,0); testError(new String[]{"-help","-refreshNodes"},"Usage: yarn rmadmin [-refreshNodes]",dataErr,0); testError(new String[]{"-help","-refreshUserToGroupsMappings"},"Usage: yarn rmadmin [-refreshUserToGroupsMappings]",dataErr,0); testError(new String[]{"-help","-refreshSuperUserGroupsConfiguration"},"Usage: yarn rmadmin [-refreshSuperUserGroupsConfiguration]",dataErr,0); testError(new String[]{"-help","-refreshAdminAcls"},"Usage: yarn rmadmin [-refreshAdminAcls]",dataErr,0); testError(new String[]{"-help","-refreshServiceAcl"},"Usage: yarn rmadmin [-refreshServiceAcl]",dataErr,0); testError(new String[]{"-help","-getGroups"},"Usage: yarn rmadmin [-getGroups [username]]",dataErr,0); testError(new String[]{"-help","-transitionToActive"},"Usage: yarn rmadmin [-transitionToActive " + " [--forceactive]]",dataErr,0); testError(new String[]{"-help","-transitionToStandby"},"Usage: yarn rmadmin [-transitionToStandby ]",dataErr,0); testError(new String[]{"-help","-getServiceState"},"Usage: yarn rmadmin [-getServiceState ]",dataErr,0); testError(new String[]{"-help","-checkHealth"},"Usage: yarn rmadmin [-checkHealth ]",dataErr,0); testError(new String[]{"-help","-failover"},"Usage: yarn rmadmin " + "[-failover [--forcefence] [--forceactive] " + " ]",dataErr,0); testError(new String[]{"-help","-badParameter"},"Usage: yarn rmadmin",dataErr,0); testError(new String[]{"-badParameter"},"badParameter: Unknown command",dataErr,-1); assertEquals(0,rmAdminCLIWithHAEnabled.run(args)); oldOutPrintStream.println(dataOut); assertTrue(dataOut.toString().contains("yarn rmadmin [-refreshQueues] [-refreshNodes] [-refreshSuper" + "UserGroupsConfiguration] [-refreshUserToGroupsMappings] " + "[-refreshAdminAcls] [-refreshServiceAcl] [-getGroup"+ " [username]] [-help [cmd]] [-transitionToActive "+ " [--forceactive]] [-transitionToStandby ] [-failover"+ " [--forcefence] [--forceactive] ] "+ "[-getServiceState ] [-checkHealth ]")); } finally { System.setOut(oldOutPrintStream); System.setErr(oldErrPrintStream); } }

Class: org.apache.hadoop.yarn.client.TestRMFailover

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testWebAppProxyInStandAloneMode() throws YarnException, InterruptedException, IOException { conf.setBoolean(YarnConfiguration.AUTO_FAILOVER_ENABLED,false); WebAppProxyServer webAppProxyServer=new WebAppProxyServer(); try { conf.set(YarnConfiguration.PROXY_ADDRESS,"0.0.0.0:9099"); cluster.init(conf); cluster.start(); getAdminService(0).transitionToActive(req); assertFalse("RM never turned active",-1 == cluster.getActiveRMIndex()); verifyConnections(); webAppProxyServer.init(conf); Assert.assertEquals(STATE.INITED,webAppProxyServer.getServiceState()); webAppProxyServer.start(); Assert.assertEquals(STATE.STARTED,webAppProxyServer.getServiceState()); URL wrongUrl=new URL("http://0.0.0.0:9099/proxy/" + fakeAppId); HttpURLConnection proxyConn=(HttpURLConnection)wrongUrl.openConnection(); proxyConn.connect(); verifyResponse(proxyConn); explicitFailover(); verifyConnections(); proxyConn.connect(); verifyResponse(proxyConn); } finally { webAppProxyServer.stop(); } }

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testRMWebAppRedirect() throws YarnException, InterruptedException, IOException { cluster=new MiniYARNCluster(TestRMFailover.class.getName(),2,0,1,1); conf.setBoolean(YarnConfiguration.AUTO_FAILOVER_ENABLED,false); cluster.init(conf); cluster.start(); getAdminService(0).transitionToActive(req); String rm1Url="http://0.0.0.0:18088"; String rm2Url="http://0.0.0.0:28088"; String header=getHeader("Refresh",rm2Url); assertTrue(header.contains("; url=" + rm1Url)); header=getHeader("Refresh",rm2Url + "/metrics"); assertTrue(header.contains("; url=" + rm1Url)); header=getHeader("Refresh",rm2Url + "/jmx"); assertTrue(header.contains("; url=" + rm1Url)); header=getHeader("Refresh",rm2Url + "/cluster/cluster"); assertEquals(null,header); header=getHeader("Refresh",rm2Url + "/conf"); assertEquals(null,header); header=getHeader("Refresh",rm2Url + "/stacks"); assertEquals(null,header); header=getHeader("Refresh",rm2Url + "/logLevel"); assertEquals(null,header); header=getHeader("Refresh",rm2Url + "/static"); assertEquals(null,header); header=getHeader("Refresh",rm2Url + "/logs"); assertEquals(null,header); header=getHeader("Refresh",rm2Url + "/ws/v1/cluster/info"); assertEquals(null,header); header=getHeader("Refresh",rm2Url + "/ws/v1/cluster/apps"); assertTrue(header.contains("; url=" + rm1Url)); }

Class: org.apache.hadoop.yarn.client.TestYarnApiClasses

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Test CancelDelegationTokenRequestPBImpl. * Test a transformation to prototype and back */ @Test public void testCancelDelegationTokenRequestPBImpl(){ Token token=getDelegationToken(); CancelDelegationTokenRequestPBImpl original=new CancelDelegationTokenRequestPBImpl(); original.setDelegationToken(token); CancelDelegationTokenRequestProto protoType=original.getProto(); CancelDelegationTokenRequestPBImpl copy=new CancelDelegationTokenRequestPBImpl(protoType); assertNotNull(copy.getDelegationToken()); assertEquals(token,copy.getDelegationToken()); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Test RenewDelegationTokenRequestPBImpl. * Test a transformation to prototype and back */ @Test public void testRenewDelegationTokenRequestPBImpl(){ Token token=getDelegationToken(); RenewDelegationTokenRequestPBImpl original=new RenewDelegationTokenRequestPBImpl(); original.setDelegationToken(token); RenewDelegationTokenRequestProto protoType=original.getProto(); RenewDelegationTokenRequestPBImpl copy=new RenewDelegationTokenRequestPBImpl(protoType); assertNotNull(copy.getDelegationToken()); assertEquals(token,copy.getDelegationToken()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier 
/** * Simple test Resource request. * Test hashCode, equals and compare. */ @Test public void testResourceRequest(){ Resource resource=recordFactory.newRecordInstance(Resource.class); Priority priority=recordFactory.newRecordInstance(Priority.class); ResourceRequest original=ResourceRequest.newInstance(priority,"localhost",resource,2); ResourceRequest copy=ResourceRequest.newInstance(priority,"localhost",resource,2); assertTrue(original.equals(copy)); assertEquals(0,original.compareTo(copy)); assertTrue(original.hashCode() == copy.hashCode()); copy.setNumContainers(1); assertFalse(original.equals(copy)); assertNotSame(0,original.compareTo(copy)); assertFalse(original.hashCode() == copy.hashCode()); }

Class: org.apache.hadoop.yarn.client.api.async.impl.TestNMClientAsync

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testNMClientAsync() throws Exception { Configuration conf=new Configuration(); conf.setInt(YarnConfiguration.NM_CLIENT_ASYNC_THREAD_POOL_MAX_SIZE,10); int expectedSuccess=40; int expectedFailure=40; asyncClient=new MockNMClientAsync1(expectedSuccess,expectedFailure); asyncClient.init(conf); Assert.assertEquals("The max thread pool size is not correctly set",10,asyncClient.maxThreadPoolSize); asyncClient.start(); for (int i=0; i < expectedSuccess + expectedFailure; ++i) { if (i == expectedSuccess) { while (!((TestCallbackHandler1)asyncClient.getCallbackHandler()).isAllSuccessCallsExecuted()) { Thread.sleep(10); } asyncClient.setClient(mockNMClient(1)); } Container container=mockContainer(i); ContainerLaunchContext clc=recordFactory.newRecordInstance(ContainerLaunchContext.class); asyncClient.startContainerAsync(container,clc); } while (!((TestCallbackHandler1)asyncClient.getCallbackHandler()).isStartAndQueryFailureCallsExecuted()) { Thread.sleep(10); } asyncClient.setClient(mockNMClient(2)); ((TestCallbackHandler1)asyncClient.getCallbackHandler()).path=false; for (int i=0; i < expectedFailure; ++i) { Container container=mockContainer(expectedSuccess + expectedFailure + i); ContainerLaunchContext clc=recordFactory.newRecordInstance(ContainerLaunchContext.class); asyncClient.startContainerAsync(container,clc); } while (!((TestCallbackHandler1)asyncClient.getCallbackHandler()).isStopFailureCallsExecuted()) { Thread.sleep(10); } for ( String errorMsg : ((TestCallbackHandler1)asyncClient.getCallbackHandler()).errorMsgs) { System.out.println(errorMsg); } Assert.assertEquals("Error occurs in CallbackHandler",0,((TestCallbackHandler1)asyncClient.getCallbackHandler()).errorMsgs.size()); for ( String errorMsg : ((MockNMClientAsync1)asyncClient).errorMsgs) { System.out.println(errorMsg); } Assert.assertEquals("Error occurs in ContainerEventProcessor",0,((MockNMClientAsync1)asyncClient).errorMsgs.size()); while (asyncClient.containers.size() > 0) { Thread.sleep(10); } asyncClient.stop(); Assert.assertFalse("The thread of Container Management Event Dispatcher is still alive",asyncClient.eventDispatcherThread.isAlive()); Assert.assertTrue("The thread pool is not shut down",asyncClient.threadPool.isShutdown()); }

Class: org.apache.hadoop.yarn.client.api.impl.TestAHSClient

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testGetApplicationAttempts() throws YarnException, IOException { Configuration conf=new Configuration(); final AHSClient client=new MockAHSClient(); client.init(conf); client.start(); ApplicationId applicationId=ApplicationId.newInstance(1234,5); List reports=client.getApplicationAttempts(applicationId); Assert.assertNotNull(reports); Assert.assertEquals(reports.get(0).getApplicationAttemptId(),ApplicationAttemptId.newInstance(applicationId,1)); Assert.assertEquals(reports.get(1).getApplicationAttemptId(),ApplicationAttemptId.newInstance(applicationId,2)); client.stop(); }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testGetApplicationAttempt() throws YarnException, IOException { Configuration conf=new Configuration(); final AHSClient client=new MockAHSClient(); client.init(conf); client.start(); List expectedReports=((MockAHSClient)client).getReports(); ApplicationId applicationId=ApplicationId.newInstance(1234,5); ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(applicationId,1); ApplicationAttemptReport report=client.getApplicationAttemptReport(appAttemptId); Assert.assertNotNull(report); Assert.assertEquals(report.getApplicationAttemptId().toString(),expectedReports.get(0).getCurrentApplicationAttemptId().toString()); client.stop(); }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testGetContainerReport() throws YarnException, IOException { Configuration conf=new Configuration(); final AHSClient client=new MockAHSClient(); client.init(conf); client.start(); List expectedReports=((MockAHSClient)client).getReports(); ApplicationId applicationId=ApplicationId.newInstance(1234,5); ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(applicationId,1); ContainerId containerId=ContainerId.newInstance(appAttemptId,1); ContainerReport report=client.getContainerReport(containerId); Assert.assertNotNull(report); Assert.assertEquals(report.getContainerId().toString(),(ContainerId.newInstance(expectedReports.get(0).getCurrentApplicationAttemptId(),1)).toString()); client.stop(); }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testGetContainers() throws YarnException, IOException { Configuration conf=new Configuration(); final AHSClient client=new MockAHSClient(); client.init(conf); client.start(); ApplicationId applicationId=ApplicationId.newInstance(1234,5); ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(applicationId,1); List reports=client.getContainers(appAttemptId); Assert.assertNotNull(reports); Assert.assertEquals(reports.get(0).getContainerId(),(ContainerId.newInstance(appAttemptId,1))); Assert.assertEquals(reports.get(1).getContainerId(),(ContainerId.newInstance(appAttemptId,2))); client.stop(); }

Class: org.apache.hadoop.yarn.client.api.impl.TestAMRMClient

APIUtilityVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=60000) public void testAMRMClientMatchingFit() throws YarnException, IOException { AMRMClient amClient=null; try { amClient=AMRMClient.createAMRMClient(); amClient.init(conf); amClient.start(); amClient.registerApplicationMaster("Host",10000,""); Resource capability1=Resource.newInstance(1024,2); Resource capability2=Resource.newInstance(1024,1); Resource capability3=Resource.newInstance(1000,2); Resource capability4=Resource.newInstance(2000,1); Resource capability5=Resource.newInstance(1000,3); Resource capability6=Resource.newInstance(2000,1); Resource capability7=Resource.newInstance(2000,1); ContainerRequest storedContainer1=new ContainerRequest(capability1,nodes,racks,priority); ContainerRequest storedContainer2=new ContainerRequest(capability2,nodes,racks,priority); ContainerRequest storedContainer3=new ContainerRequest(capability3,nodes,racks,priority); ContainerRequest storedContainer4=new ContainerRequest(capability4,nodes,racks,priority); ContainerRequest storedContainer5=new ContainerRequest(capability5,nodes,racks,priority); ContainerRequest storedContainer6=new ContainerRequest(capability6,nodes,racks,priority); ContainerRequest storedContainer7=new ContainerRequest(capability7,nodes,racks,priority2,false); amClient.addContainerRequest(storedContainer1); amClient.addContainerRequest(storedContainer2); amClient.addContainerRequest(storedContainer3); amClient.addContainerRequest(storedContainer4); amClient.addContainerRequest(storedContainer5); amClient.addContainerRequest(storedContainer6); amClient.addContainerRequest(storedContainer7); List> matches; ContainerRequest storedRequest; Resource testCapability1=Resource.newInstance(1024,2); matches=amClient.getMatchingRequests(priority,node,testCapability1); verifyMatches(matches,1); storedRequest=matches.get(0).iterator().next(); assertEquals(storedContainer1,storedRequest); amClient.removeContainerRequest(storedContainer1); Resource testCapability2=Resource.newInstance(2000,1); matches=amClient.getMatchingRequests(priority,node,testCapability2); verifyMatches(matches,2); int i=0; for ( ContainerRequest storedRequest1 : matches.get(0)) { if (i++ == 0) { assertEquals(storedContainer4,storedRequest1); } else { assertEquals(storedContainer6,storedRequest1); } } amClient.removeContainerRequest(storedContainer6); Resource testCapability3=Resource.newInstance(4000,4); matches=amClient.getMatchingRequests(priority,node,testCapability3); assert (matches.size() == 4); Resource testCapability4=Resource.newInstance(1024,2); matches=amClient.getMatchingRequests(priority,node,testCapability4); assert (matches.size() == 2); for ( Collection testSet : matches) { assertEquals(1,testSet.size()); ContainerRequest testRequest=testSet.iterator().next(); assertTrue(testRequest != storedContainer4); assertTrue(testRequest != storedContainer5); assert (testRequest == storedContainer2 || testRequest == storedContainer3); } Resource testCapability5=Resource.newInstance(512,4); matches=amClient.getMatchingRequests(priority,node,testCapability5); assert (matches.size() == 0); Resource testCapability7=Resource.newInstance(2000,1); matches=amClient.getMatchingRequests(priority2,ResourceRequest.ANY,testCapability7); assert (matches.size() == 0); matches=amClient.getMatchingRequests(priority2,node,testCapability7); assert (matches.size() == 1); amClient.unregisterApplicationMaster(FinalApplicationStatus.SUCCEEDED,null,null); } finally { if (amClient != null && amClient.getServiceState() == STATE.STARTED) { amClient.stop(); } } }

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=60000) public void testAllocationWithBlacklist() throws YarnException, IOException { AMRMClientImpl amClient=null; try { amClient=(AMRMClientImpl)AMRMClient.createAMRMClient(); amClient.init(conf); amClient.start(); amClient.registerApplicationMaster("Host",10000,""); assertEquals(0,amClient.ask.size()); assertEquals(0,amClient.release.size()); ContainerRequest storedContainer1=new ContainerRequest(capability,nodes,racks,priority); amClient.addContainerRequest(storedContainer1); assertEquals(3,amClient.ask.size()); assertEquals(0,amClient.release.size()); List localNodeBlacklist=new ArrayList(); localNodeBlacklist.add(node); amClient.updateBlacklist(localNodeBlacklist,null); int allocatedContainerCount=getAllocatedContainersNumber(amClient,DEFAULT_ITERATION); assertEquals(0,allocatedContainerCount); amClient.updateBlacklist(null,localNodeBlacklist); ContainerRequest storedContainer2=new ContainerRequest(capability,nodes,racks,priority); amClient.addContainerRequest(storedContainer2); allocatedContainerCount=getAllocatedContainersNumber(amClient,DEFAULT_ITERATION); assertEquals(2,allocatedContainerCount); assertTrue(amClient.blacklistAdditions.isEmpty()); assertTrue(amClient.blacklistRemovals.isEmpty()); ContainerRequest invalidContainerRequest=new ContainerRequest(Resource.newInstance(-1024,1),nodes,racks,priority); amClient.addContainerRequest(invalidContainerRequest); amClient.updateBlacklist(localNodeBlacklist,null); try { amClient.allocate(0.1f); fail("there should be an exception here."); } catch ( Exception e) { assertEquals(1,amClient.blacklistAdditions.size()); } } finally { if (amClient != null && amClient.getServiceState() == STATE.STARTED) { amClient.stop(); } } }

UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=60000) public void testAMRMClientOnAMRMTokenRollOver() throws YarnException, IOException { AMRMClient amClient=null; try { AMRMTokenSecretManager amrmTokenSecretManager=yarnCluster.getResourceManager().getRMContext().getAMRMTokenSecretManager(); amClient=AMRMClient.createAMRMClient(); amClient.init(conf); amClient.start(); Long startTime=System.currentTimeMillis(); amClient.registerApplicationMaster("Host",10000,""); org.apache.hadoop.security.token.Token amrmToken_1=getAMRMToken(); Assert.assertNotNull(amrmToken_1); Assert.assertEquals(amrmToken_1.decodeIdentifier().getKeyId(),amrmTokenSecretManager.getMasterKey().getMasterKey().getKeyId()); while (System.currentTimeMillis() - startTime < rolling_interval_sec * 1000) { amClient.allocate(0.1f); try { Thread.sleep(1000); } catch ( InterruptedException e) { e.printStackTrace(); } } amClient.allocate(0.1f); org.apache.hadoop.security.token.Token amrmToken_2=getAMRMToken(); Assert.assertNotNull(amrmToken_2); Assert.assertEquals(amrmToken_2.decodeIdentifier().getKeyId(),amrmTokenSecretManager.getMasterKey().getMasterKey().getKeyId()); Assert.assertNotEquals(amrmToken_1,amrmToken_2); amClient.allocate(0.1f); while (true) { if (amrmToken_2.decodeIdentifier().getKeyId() != amrmTokenSecretManager.getCurrnetMasterKeyData().getMasterKey().getKeyId()) { if (amrmTokenSecretManager.getNextMasterKeyData() == null) { break; } else if (amrmToken_2.decodeIdentifier().getKeyId() != amrmTokenSecretManager.getNextMasterKeyData().getMasterKey().getKeyId()) { break; } } amClient.allocate(0.1f); try { Thread.sleep(1000); } catch ( InterruptedException e) { } } try { UserGroupInformation testUser=UserGroupInformation.createRemoteUser("testUser"); SecurityUtil.setTokenService(amrmToken_2,yarnCluster.getResourceManager().getApplicationMasterService().getBindAddress()); testUser.addToken(amrmToken_2); testUser.doAs(new PrivilegedAction(){ @Override public ApplicationMasterProtocol run(){ return (ApplicationMasterProtocol)YarnRPC.create(conf).getProxy(ApplicationMasterProtocol.class,yarnCluster.getResourceManager().getApplicationMasterService().getBindAddress(),conf); } } ).allocate(Records.newRecord(AllocateRequest.class)); Assert.fail("The old Token should not work"); } catch ( Exception ex) { Assert.assertTrue(ex instanceof InvalidToken); Assert.assertTrue(ex.getMessage().contains("Invalid AMRMToken from " + amrmToken_2.decodeIdentifier().getApplicationAttemptId())); } amClient.unregisterApplicationMaster(FinalApplicationStatus.SUCCEEDED,null,null); } finally { if (amClient != null && amClient.getServiceState() == STATE.STARTED) { amClient.stop(); } } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=60000) public void testAMRMClientMatchingFitInferredRack() throws YarnException, IOException { AMRMClientImpl amClient=null; try { amClient=new AMRMClientImpl(); amClient.init(conf); amClient.start(); amClient.registerApplicationMaster("Host",10000,""); Resource capability=Resource.newInstance(1024,2); ContainerRequest storedContainer1=new ContainerRequest(capability,nodes,null,priority); amClient.addContainerRequest(storedContainer1); List> matches; ContainerRequest storedRequest; matches=amClient.getMatchingRequests(priority,node,capability); verifyMatches(matches,1); storedRequest=matches.get(0).iterator().next(); assertEquals(storedContainer1,storedRequest); matches=amClient.getMatchingRequests(priority,rack,capability); verifyMatches(matches,1); storedRequest=matches.get(0).iterator().next(); assertEquals(storedContainer1,storedRequest); amClient.removeContainerRequest(storedContainer1); matches=amClient.getMatchingRequests(priority,rack,capability); assertTrue(matches.isEmpty()); amClient.unregisterApplicationMaster(FinalApplicationStatus.SUCCEEDED,null,null); } finally { if (amClient != null && amClient.getServiceState() == STATE.STARTED) { amClient.stop(); } } }

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
@Test public void testAMRMClientMatchStorage() throws YarnException, IOException { AMRMClientImpl amClient=null; try { amClient=(AMRMClientImpl)AMRMClient.createAMRMClient(); amClient.init(conf); amClient.start(); amClient.registerApplicationMaster("Host",10000,""); Priority priority1=Records.newRecord(Priority.class); priority1.setPriority(2); ContainerRequest storedContainer1=new ContainerRequest(capability,nodes,racks,priority); ContainerRequest storedContainer2=new ContainerRequest(capability,nodes,racks,priority); ContainerRequest storedContainer3=new ContainerRequest(capability,null,null,priority1); amClient.addContainerRequest(storedContainer1); amClient.addContainerRequest(storedContainer2); amClient.addContainerRequest(storedContainer3); int containersRequestedAny=amClient.remoteRequestsTable.get(priority).get(ResourceRequest.ANY).get(capability).remoteRequest.getNumContainers(); assertEquals(2,containersRequestedAny); containersRequestedAny=amClient.remoteRequestsTable.get(priority1).get(ResourceRequest.ANY).get(capability).remoteRequest.getNumContainers(); assertEquals(1,containersRequestedAny); List> matches=amClient.getMatchingRequests(priority,node,capability); verifyMatches(matches,2); matches=amClient.getMatchingRequests(priority,rack,capability); verifyMatches(matches,2); matches=amClient.getMatchingRequests(priority,ResourceRequest.ANY,capability); verifyMatches(matches,2); matches=amClient.getMatchingRequests(priority1,rack,capability); assertTrue(matches.isEmpty()); matches=amClient.getMatchingRequests(priority1,ResourceRequest.ANY,capability); verifyMatches(matches,1); amClient.removeContainerRequest(storedContainer3); matches=amClient.getMatchingRequests(priority,node,capability); verifyMatches(matches,2); amClient.removeContainerRequest(storedContainer2); matches=amClient.getMatchingRequests(priority,node,capability); verifyMatches(matches,1); matches=amClient.getMatchingRequests(priority,rack,capability); verifyMatches(matches,1); ContainerRequest storedRequest=matches.get(0).iterator().next(); assertEquals(storedContainer1,storedRequest); amClient.removeContainerRequest(storedContainer1); matches=amClient.getMatchingRequests(priority,ResourceRequest.ANY,capability); assertTrue(matches.isEmpty()); matches=amClient.getMatchingRequests(priority1,ResourceRequest.ANY,capability); assertTrue(matches.isEmpty()); assertTrue(amClient.remoteRequestsTable.isEmpty()); amClient.addContainerRequest(storedContainer1); amClient.addContainerRequest(storedContainer3); int allocatedContainerCount=0; int iterationsLeft=3; while (allocatedContainerCount < 2 && iterationsLeft-- > 0) { Log.info(" == alloc " + allocatedContainerCount + " it left "+ iterationsLeft); AllocateResponse allocResponse=amClient.allocate(0.1f); assertEquals(0,amClient.ask.size()); assertEquals(0,amClient.release.size()); assertEquals(nodeCount,amClient.getClusterNodeCount()); allocatedContainerCount+=allocResponse.getAllocatedContainers().size(); for ( Container container : allocResponse.getAllocatedContainers()) { ContainerRequest expectedRequest=container.getPriority().equals(storedContainer1.getPriority()) ? storedContainer1 : storedContainer3; matches=amClient.getMatchingRequests(container.getPriority(),ResourceRequest.ANY,container.getResource()); verifyMatches(matches,1); ContainerRequest matchedRequest=matches.get(0).iterator().next(); assertEquals(matchedRequest,expectedRequest); amClient.removeContainerRequest(matchedRequest); amClient.releaseAssignedContainer(container.getId()); } if (allocatedContainerCount < containersRequestedAny) { sleep(100); } } assertEquals(2,allocatedContainerCount); AllocateResponse allocResponse=amClient.allocate(0.1f); assertEquals(0,amClient.release.size()); assertEquals(0,amClient.ask.size()); assertEquals(0,allocResponse.getAllocatedContainers().size()); assertTrue(amClient.remoteRequestsTable.isEmpty()); amClient.unregisterApplicationMaster(FinalApplicationStatus.SUCCEEDED,null,null); } finally { if (amClient != null && amClient.getServiceState() == STATE.STARTED) { amClient.stop(); } } }

Class: org.apache.hadoop.yarn.client.api.impl.TestAMRMClientOnRMRestart

UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
@Test(timeout=30000) public void testAMRMClientOnAMRMTokenRollOverOnRMRestart() throws Exception { conf.setLong(YarnConfiguration.RM_AMRM_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS,rolling_interval_sec); conf.setLong(YarnConfiguration.RM_AM_EXPIRY_INTERVAL_MS,am_expire_ms); MemoryRMStateStore memStore=new MemoryRMStateStore(); memStore.init(conf); MyResourceManager2 rm1=new MyResourceManager2(conf,memStore); rm1.start(); DrainDispatcher dispatcher=(DrainDispatcher)rm1.getRMContext().getDispatcher(); Long startTime=System.currentTimeMillis(); RMApp app=rm1.submitApp(1024); dispatcher.await(); MockNM nm1=new MockNM("h1:1234",15120,rm1.getResourceTrackerService()); nm1.registerNode(); nm1.nodeHeartbeat(true); dispatcher.await(); ApplicationAttemptId appAttemptId=app.getCurrentAppAttempt().getAppAttemptId(); rm1.sendAMLaunched(appAttemptId); dispatcher.await(); AMRMTokenSecretManager amrmTokenSecretManagerForRM1=rm1.getRMContext().getAMRMTokenSecretManager(); org.apache.hadoop.security.token.Token token=amrmTokenSecretManagerForRM1.createAndGetAMRMToken(appAttemptId); UserGroupInformation ugi=UserGroupInformation.getCurrentUser(); ugi.addTokenIdentifier(token.decodeIdentifier()); AMRMClient amClient=new MyAMRMClientImpl(rm1); amClient.init(conf); amClient.start(); amClient.registerApplicationMaster("h1",10000,""); amClient.allocate(0.1f); while (System.currentTimeMillis() - startTime < rolling_interval_sec * 1000) { amClient.allocate(0.1f); try { Thread.sleep(1000); } catch ( InterruptedException e) { } } Assert.assertTrue(amrmTokenSecretManagerForRM1.getMasterKey().getMasterKey().getKeyId() != token.decodeIdentifier().getKeyId()); amClient.allocate(0.1f); org.apache.hadoop.security.token.Token newToken=amrmTokenSecretManagerForRM1.createAndGetAMRMToken(appAttemptId); int waitCount=0; while (waitCount++ <= 50) { if (amrmTokenSecretManagerForRM1.getCurrnetMasterKeyData().getMasterKey().getKeyId() != token.decodeIdentifier().getKeyId()) { break; } try { amClient.allocate(0.1f); } catch ( Exception ex) { break; } Thread.sleep(500); } Assert.assertTrue(amrmTokenSecretManagerForRM1.getNextMasterKeyData() == null); Assert.assertTrue(amrmTokenSecretManagerForRM1.getCurrnetMasterKeyData().getMasterKey().getKeyId() == newToken.decodeIdentifier().getKeyId()); conf.set(YarnConfiguration.RM_SCHEDULER_ADDRESS,"0.0.0.0:9030"); final MyResourceManager2 rm2=new MyResourceManager2(conf,memStore); rm2.start(); nm1.setResourceTrackerService(rm2.getResourceTrackerService()); ((MyAMRMClientImpl)amClient).updateRMProxy(rm2); dispatcher=(DrainDispatcher)rm2.getRMContext().getDispatcher(); AMRMTokenSecretManager amrmTokenSecretManagerForRM2=rm2.getRMContext().getAMRMTokenSecretManager(); Assert.assertTrue(amrmTokenSecretManagerForRM2.getCurrnetMasterKeyData().getMasterKey().getKeyId() == newToken.decodeIdentifier().getKeyId()); Assert.assertTrue(amrmTokenSecretManagerForRM2.getNextMasterKeyData() == null); try { UserGroupInformation testUser=UserGroupInformation.createRemoteUser("testUser"); SecurityUtil.setTokenService(token,rm2.getApplicationMasterService().getBindAddress()); testUser.addToken(token); testUser.doAs(new PrivilegedAction(){ @Override public ApplicationMasterProtocol run(){ return (ApplicationMasterProtocol)YarnRPC.create(conf).getProxy(ApplicationMasterProtocol.class,rm2.getApplicationMasterService().getBindAddress(),conf); } } ).allocate(Records.newRecord(AllocateRequest.class)); Assert.fail("The old Token should not work"); } catch ( Exception ex) { Assert.assertTrue(ex instanceof InvalidToken); Assert.assertTrue(ex.getMessage().contains("Invalid AMRMToken from " + token.decodeIdentifier().getApplicationAttemptId())); } amClient.allocate(0.1f); amClient.unregisterApplicationMaster(FinalApplicationStatus.SUCCEEDED,null,null); amClient.stop(); rm1.stop(); rm2.stop(); }

Class: org.apache.hadoop.yarn.client.api.impl.TestNMClient

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=180000) public void testNMClientNoCleanupOnStop() throws YarnException, IOException { rmClient.registerApplicationMaster("Host",10000,""); testContainerManagement(nmClient,allocateContainers(rmClient,5)); rmClient.unregisterApplicationMaster(FinalApplicationStatus.SUCCEEDED,null,null); stopNmClient(false); assertFalse(nmClient.startedContainers.isEmpty()); nmClient.cleanupRunningContainers(); assertEquals(0,nmClient.startedContainers.size()); }

BranchVerifier TestInitializer UtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Before public void setup() throws YarnException, IOException { conf=new YarnConfiguration(); yarnCluster=new MiniYARNCluster(TestAMRMClient.class.getName(),nodeCount,1,1); yarnCluster.init(conf); yarnCluster.start(); assertNotNull(yarnCluster); assertEquals(STATE.STARTED,yarnCluster.getServiceState()); yarnClient=(YarnClientImpl)YarnClient.createYarnClient(); yarnClient.init(conf); yarnClient.start(); assertNotNull(yarnClient); assertEquals(STATE.STARTED,yarnClient.getServiceState()); nodeReports=yarnClient.getNodeReports(NodeState.RUNNING); ApplicationSubmissionContext appContext=yarnClient.createApplication().getApplicationSubmissionContext(); ApplicationId appId=appContext.getApplicationId(); appContext.setApplicationName("Test"); Priority pri=Priority.newInstance(0); appContext.setPriority(pri); appContext.setQueue("default"); ContainerLaunchContext amContainer=Records.newRecord(ContainerLaunchContext.class); appContext.setAMContainerSpec(amContainer); appContext.setUnmanagedAM(true); SubmitApplicationRequest appRequest=Records.newRecord(SubmitApplicationRequest.class); appRequest.setApplicationSubmissionContext(appContext); yarnClient.submitApplication(appContext); int iterationsLeft=30; RMAppAttempt appAttempt=null; while (iterationsLeft > 0) { ApplicationReport appReport=yarnClient.getApplicationReport(appId); if (appReport.getYarnApplicationState() == YarnApplicationState.ACCEPTED) { attemptId=appReport.getCurrentApplicationAttemptId(); appAttempt=yarnCluster.getResourceManager().getRMContext().getRMApps().get(attemptId.getApplicationId()).getCurrentAppAttempt(); while (true) { if (appAttempt.getAppAttemptState() == RMAppAttemptState.LAUNCHED) { break; } } break; } sleep(1000); --iterationsLeft; } if (iterationsLeft == 0) { fail("Application hasn't bee started"); } UserGroupInformation.setLoginUser(UserGroupInformation.createRemoteUser(UserGroupInformation.getCurrentUser().getUserName())); UserGroupInformation.getCurrentUser().addToken(appAttempt.getAMRMToken()); nmTokenCache=new NMTokenCache(); rmClient=(AMRMClientImpl)AMRMClient.createAMRMClient(); rmClient.setNMTokenCache(nmTokenCache); rmClient.init(conf); rmClient.start(); assertNotNull(rmClient); assertEquals(STATE.STARTED,rmClient.getServiceState()); nmClient=(NMClientImpl)NMClient.createNMClient(); nmClient.setNMTokenCache(rmClient.getNMTokenCache()); nmClient.init(conf); nmClient.start(); assertNotNull(nmClient); assertEquals(STATE.STARTED,nmClient.getServiceState()); }

Class: org.apache.hadoop.yarn.client.api.impl.TestTimelineClient

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testPostEntities() throws Exception { mockClientResponse(client,ClientResponse.Status.OK,false,false); try { TimelinePutResponse response=client.putEntities(generateEntity()); Assert.assertEquals(0,response.getErrors().size()); } catch ( YarnException e) { Assert.fail("Exception is not expected"); } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testPostEntitiesWithError() throws Exception { mockClientResponse(client,ClientResponse.Status.OK,true,false); try { TimelinePutResponse response=client.putEntities(generateEntity()); Assert.assertEquals(1,response.getErrors().size()); Assert.assertEquals("test entity id",response.getErrors().get(0).getEntityId()); Assert.assertEquals("test entity type",response.getErrors().get(0).getEntityType()); Assert.assertEquals(TimelinePutResponse.TimelinePutError.IO_EXCEPTION,response.getErrors().get(0).getErrorCode()); } catch ( YarnException e) { Assert.fail("Exception is not expected"); } }

APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testPostEntitiesTimelineServiceDefaultNotEnabled() throws Exception { YarnConfiguration conf=new YarnConfiguration(); conf.unset(YarnConfiguration.TIMELINE_SERVICE_ENABLED); TimelineClientImpl client=createTimelineClient(conf); mockClientResponse(client,ClientResponse.Status.INTERNAL_SERVER_ERROR,false,false); try { TimelinePutResponse response=client.putEntities(generateEntity()); Assert.assertEquals(0,response.getErrors().size()); } catch ( YarnException e) { Assert.fail("putEntities should already return before throwing the exception"); } }

APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testPostEntitiesTimelineServiceNotEnabled() throws Exception { YarnConfiguration conf=new YarnConfiguration(); conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED,false); TimelineClientImpl client=createTimelineClient(conf); mockClientResponse(client,ClientResponse.Status.INTERNAL_SERVER_ERROR,false,false); try { TimelinePutResponse response=client.putEntities(generateEntity()); Assert.assertEquals(0,response.getErrors().size()); } catch ( YarnException e) { Assert.fail("putEntities should already return before throwing the exception"); } }

UtilityVerifier BooleanVerifier HybridVerifier 
@Test public void testPostEntitiesConnectionRefused() throws Exception { mockClientResponse(client,null,false,true); try { client.putEntities(generateEntity()); Assert.fail("RuntimeException is expected"); } catch ( RuntimeException re) { Assert.assertTrue(re instanceof ClientHandlerException); } }

UtilityVerifier BooleanVerifier HybridVerifier 
@Test public void testPostEntitiesNoResponse() throws Exception { mockClientResponse(client,ClientResponse.Status.INTERNAL_SERVER_ERROR,false,false); try { client.putEntities(generateEntity()); Assert.fail("Exception is expected"); } catch ( YarnException e) { Assert.assertTrue(e.getMessage().contains("Failed to get the response from the timeline server.")); } }

Class: org.apache.hadoop.yarn.client.api.impl.TestYarnClient

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testGetApplicationAttempts() throws YarnException, IOException { Configuration conf=new Configuration(); final YarnClient client=new MockYarnClient(); client.init(conf); client.start(); ApplicationId applicationId=ApplicationId.newInstance(1234,5); List reports=client.getApplicationAttempts(applicationId); Assert.assertNotNull(reports); Assert.assertEquals(reports.get(0).getApplicationAttemptId(),ApplicationAttemptId.newInstance(applicationId,1)); Assert.assertEquals(reports.get(1).getApplicationAttemptId(),ApplicationAttemptId.newInstance(applicationId,2)); client.stop(); }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testGetContainers() throws YarnException, IOException { Configuration conf=new Configuration(); final YarnClient client=new MockYarnClient(); client.init(conf); client.start(); ApplicationId applicationId=ApplicationId.newInstance(1234,5); ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(applicationId,1); List reports=client.getContainers(appAttemptId); Assert.assertNotNull(reports); Assert.assertEquals(reports.get(0).getContainerId(),(ContainerId.newInstance(appAttemptId,1))); Assert.assertEquals(reports.get(1).getContainerId(),(ContainerId.newInstance(appAttemptId,2))); client.stop(); }

APIUtilityVerifier IterativeVerifier BranchVerifier UtilityVerifier InternalCallVerifier NullVerifier HybridVerifier 
@Test(timeout=30000) public void testAMMRTokens() throws Exception { MiniYARNCluster cluster=new MiniYARNCluster("testMRAMTokens",1,1,1); YarnClient rmClient=null; try { cluster.init(new YarnConfiguration()); cluster.start(); final Configuration yarnConf=cluster.getConfig(); rmClient=YarnClient.createYarnClient(); rmClient.init(yarnConf); rmClient.start(); ApplicationId appId=createApp(rmClient,false); waitTillAccepted(rmClient,appId); Assert.assertNull(rmClient.getAMRMToken(appId)); appId=createApp(rmClient,true); waitTillAccepted(rmClient,appId); long start=System.currentTimeMillis(); while (rmClient.getAMRMToken(appId) == null) { if (System.currentTimeMillis() - start > 20 * 1000) { Assert.fail("AMRM token is null"); } Thread.sleep(100); } Assert.assertNotNull(rmClient.getAMRMToken(appId)); UserGroupInformation other=UserGroupInformation.createUserForTesting("foo",new String[]{}); appId=other.doAs(new PrivilegedExceptionAction(){ @Override public ApplicationId run() throws Exception { YarnClient rmClient=YarnClient.createYarnClient(); rmClient.init(yarnConf); rmClient.start(); ApplicationId appId=createApp(rmClient,true); waitTillAccepted(rmClient,appId); long start=System.currentTimeMillis(); while (rmClient.getAMRMToken(appId) == null) { if (System.currentTimeMillis() - start > 20 * 1000) { Assert.fail("AMRM token is null"); } Thread.sleep(100); } Assert.assertNotNull(rmClient.getAMRMToken(appId)); return appId; } } ); Assert.assertNull(rmClient.getAMRMToken(appId)); } finally { if (rmClient != null) { rmClient.stop(); } cluster.stop(); } }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testGetApplications() throws YarnException, IOException { Configuration conf=new Configuration(); final YarnClient client=new MockYarnClient(); client.init(conf); client.start(); List expectedReports=((MockYarnClient)client).getReports(); List reports=client.getApplications(); Assert.assertEquals(reports,expectedReports); Set appTypes=new HashSet(); appTypes.add("YARN"); appTypes.add("NON-YARN"); reports=client.getApplications(appTypes,null); Assert.assertEquals(reports.size(),2); Assert.assertTrue((reports.get(0).getApplicationType().equals("YARN") && reports.get(1).getApplicationType().equals("NON-YARN")) || (reports.get(1).getApplicationType().equals("YARN") && reports.get(0).getApplicationType().equals("NON-YARN"))); for ( ApplicationReport report : reports) { Assert.assertTrue(expectedReports.contains(report)); } EnumSet appStates=EnumSet.noneOf(YarnApplicationState.class); appStates.add(YarnApplicationState.FINISHED); appStates.add(YarnApplicationState.FAILED); reports=client.getApplications(null,appStates); Assert.assertEquals(reports.size(),2); Assert.assertTrue((reports.get(0).getApplicationType().equals("NON-YARN") && reports.get(1).getApplicationType().equals("NON-MAPREDUCE")) || (reports.get(1).getApplicationType().equals("NON-YARN") && reports.get(0).getApplicationType().equals("NON-MAPREDUCE"))); for ( ApplicationReport report : reports) { Assert.assertTrue(expectedReports.contains(report)); } reports=client.getApplications(appTypes,appStates); Assert.assertEquals(reports.size(),1); Assert.assertTrue((reports.get(0).getApplicationType().equals("NON-YARN"))); for ( ApplicationReport report : reports) { Assert.assertTrue(expectedReports.contains(report)); } client.stop(); }

IterativeVerifier UtilityVerifier BooleanVerifier HybridVerifier 
@SuppressWarnings("deprecation") @Test(timeout=30000) public void testSubmitApplication(){ Configuration conf=new Configuration(); conf.setLong(YarnConfiguration.YARN_CLIENT_APP_SUBMISSION_POLL_INTERVAL_MS,100); final YarnClient client=new MockYarnClient(); client.init(conf); client.start(); YarnApplicationState[] exitStates=new YarnApplicationState[]{YarnApplicationState.SUBMITTED,YarnApplicationState.ACCEPTED,YarnApplicationState.RUNNING,YarnApplicationState.FINISHED,YarnApplicationState.FAILED,YarnApplicationState.KILLED}; ApplicationSubmissionContext contextWithoutApplicationId=mock(ApplicationSubmissionContext.class); try { client.submitApplication(contextWithoutApplicationId); Assert.fail("Should throw the ApplicationIdNotProvidedException"); } catch ( YarnException e) { Assert.assertTrue(e instanceof ApplicationIdNotProvidedException); Assert.assertTrue(e.getMessage().contains("ApplicationId is not provided in ApplicationSubmissionContext")); } catch ( IOException e) { Assert.fail("IOException is not expected."); } for (int i=0; i < exitStates.length; ++i) { ApplicationSubmissionContext context=mock(ApplicationSubmissionContext.class); ApplicationId applicationId=ApplicationId.newInstance(System.currentTimeMillis(),i); when(context.getApplicationId()).thenReturn(applicationId); ((MockYarnClient)client).setYarnApplicationState(exitStates[i]); try { client.submitApplication(context); } catch ( YarnException e) { Assert.fail("Exception is not expected."); } catch ( IOException e) { Assert.fail("Exception is not expected."); } verify(((MockYarnClient)client).mockReport,times(4 * i + 4)).getYarnApplicationState(); } client.stop(); }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testGetApplicationAttempt() throws YarnException, IOException { Configuration conf=new Configuration(); final YarnClient client=new MockYarnClient(); client.init(conf); client.start(); List expectedReports=((MockYarnClient)client).getReports(); ApplicationId applicationId=ApplicationId.newInstance(1234,5); ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(applicationId,1); ApplicationAttemptReport report=client.getApplicationAttemptReport(appAttemptId); Assert.assertNotNull(report); Assert.assertEquals(report.getApplicationAttemptId().toString(),expectedReports.get(0).getCurrentApplicationAttemptId().toString()); client.stop(); }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testGetContainerReport() throws YarnException, IOException { Configuration conf=new Configuration(); final YarnClient client=new MockYarnClient(); client.init(conf); client.start(); List expectedReports=((MockYarnClient)client).getReports(); ApplicationId applicationId=ApplicationId.newInstance(1234,5); ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(applicationId,1); ContainerId containerId=ContainerId.newInstance(appAttemptId,1); ContainerReport report=client.getContainerReport(containerId); Assert.assertNotNull(report); Assert.assertEquals(report.getContainerId().toString(),(ContainerId.newInstance(expectedReports.get(0).getCurrentApplicationAttemptId(),1)).toString()); client.stop(); }

Class: org.apache.hadoop.yarn.client.cli.TestLogsCLI

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=5000l) public void testHelpMessage() throws Exception { Configuration conf=new YarnConfiguration(); YarnClient mockYarnClient=createMockYarnClient(YarnApplicationState.FINISHED); LogsCLI dumper=new LogsCLIForTest(mockYarnClient); dumper.setConf(conf); int exitCode=dumper.run(new String[]{}); assertTrue(exitCode == -1); ByteArrayOutputStream baos=new ByteArrayOutputStream(); PrintWriter pw=new PrintWriter(baos); pw.println("Retrieve logs for completed YARN applications."); pw.println("usage: yarn logs -applicationId [OPTIONS]"); pw.println(); pw.println("general options are:"); pw.println(" -appOwner AppOwner (assumed to be current user if"); pw.println(" not specified)"); pw.println(" -containerId ContainerId (must be specified if node"); pw.println(" address is specified)"); pw.println(" -nodeAddress NodeAddress in the format nodename:port"); pw.println(" (must be specified if container id is"); pw.println(" specified)"); pw.close(); String appReportStr=baos.toString("UTF-8"); Assert.assertEquals(appReportStr,sysOutStream.toString()); }

Class: org.apache.hadoop.yarn.client.cli.TestYarnCLI

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testAppsHelpCommand() throws Exception { ApplicationCLI cli=createAndGetAppCLI(); ApplicationCLI spyCli=spy(cli); int result=spyCli.run(new String[]{"application","-help"}); Assert.assertTrue(result == 0); verify(spyCli).printUsage(any(String.class),any(Options.class)); Assert.assertEquals(createApplicationCLIHelpMessage(),sysOutStream.toString()); sysOutStream.reset(); ApplicationId applicationId=ApplicationId.newInstance(1234,5); result=cli.run(new String[]{"application","-kill",applicationId.toString(),"args"}); verify(spyCli).printUsage(any(String.class),any(Options.class)); Assert.assertEquals(createApplicationCLIHelpMessage(),sysOutStream.toString()); sysOutStream.reset(); NodeId nodeId=NodeId.newInstance("host0",0); result=cli.run(new String[]{"application","-status",nodeId.toString(),"args"}); verify(spyCli).printUsage(any(String.class),any(Options.class)); Assert.assertEquals(createApplicationCLIHelpMessage(),sysOutStream.toString()); }

UtilityVerifier InternalCallVerifier IdentityVerifier EqualityVerifier HybridVerifier 
@Test public void testKillApplication() throws Exception { ApplicationCLI cli=createAndGetAppCLI(); ApplicationId applicationId=ApplicationId.newInstance(1234,5); ApplicationReport newApplicationReport2=ApplicationReport.newInstance(applicationId,ApplicationAttemptId.newInstance(applicationId,1),"user","queue","appname","host",124,null,YarnApplicationState.FINISHED,"diagnostics","url",0,0,FinalApplicationStatus.SUCCEEDED,null,"N/A",0.53789f,"YARN",null); when(client.getApplicationReport(any(ApplicationId.class))).thenReturn(newApplicationReport2); int result=cli.run(new String[]{"application","-kill",applicationId.toString()}); assertEquals(0,result); verify(client,times(0)).killApplication(any(ApplicationId.class)); verify(sysOut).println("Application " + applicationId + " has already finished "); ApplicationReport newApplicationReport=ApplicationReport.newInstance(applicationId,ApplicationAttemptId.newInstance(applicationId,1),"user","queue","appname","host",124,null,YarnApplicationState.RUNNING,"diagnostics","url",0,0,FinalApplicationStatus.SUCCEEDED,null,"N/A",0.53789f,"YARN",null); when(client.getApplicationReport(any(ApplicationId.class))).thenReturn(newApplicationReport); result=cli.run(new String[]{"application","-kill",applicationId.toString()}); assertEquals(0,result); verify(client).killApplication(any(ApplicationId.class)); verify(sysOut).println("Killing application application_1234_0005"); doThrow(new ApplicationNotFoundException("Application with id '" + applicationId + "' doesn't exist in RM.")).when(client).getApplicationReport(applicationId); cli=createAndGetAppCLI(); try { int exitCode=cli.run(new String[]{"application","-kill",applicationId.toString()}); verify(sysOut).println("Application with id '" + applicationId + "' doesn't exist in RM."); Assert.assertNotSame("should return non-zero exit code.",0,exitCode); } catch ( ApplicationNotFoundException appEx) { Assert.fail("application -kill should not throw" + "ApplicationNotFoundException. " + appEx); } catch ( Exception e) { Assert.fail("Unexpected exception: " + e); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testContainersHelpCommand() throws Exception { ApplicationCLI cli=createAndGetAppCLI(); ApplicationCLI spyCli=spy(cli); int result=spyCli.run(new String[]{"container","-help"}); Assert.assertTrue(result == 0); verify(spyCli).printUsage(any(String.class),any(Options.class)); Assert.assertEquals(createContainerCLIHelpMessage(),sysOutStream.toString()); sysOutStream.reset(); ApplicationId applicationId=ApplicationId.newInstance(1234,5); ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(applicationId,6); result=cli.run(new String[]{"container","-list",appAttemptId.toString(),"args"}); verify(spyCli).printUsage(any(String.class),any(Options.class)); Assert.assertEquals(createContainerCLIHelpMessage(),sysOutStream.toString()); sysOutStream.reset(); ContainerId containerId=ContainerId.newInstance(appAttemptId,7); result=cli.run(new String[]{"container","-status",containerId.toString(),"args"}); verify(spyCli).printUsage(any(String.class),any(Options.class)); Assert.assertEquals(createContainerCLIHelpMessage(),sysOutStream.toString()); }

UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testGetApplicationReportException() throws Exception { ApplicationCLI cli=createAndGetAppCLI(); ApplicationId applicationId=ApplicationId.newInstance(1234,5); when(client.getApplicationReport(any(ApplicationId.class))).thenThrow(new ApplicationNotFoundException("History file for application" + applicationId + " is not found")); try { cli.run(new String[]{"application","-status",applicationId.toString()}); Assert.fail(); } catch ( Exception ex) { Assert.assertTrue(ex instanceof ApplicationNotFoundException); Assert.assertEquals("History file for application" + applicationId + " is not found",ex.getMessage()); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testAppAttemptsHelpCommand() throws Exception { ApplicationCLI cli=createAndGetAppCLI(); ApplicationCLI spyCli=spy(cli); int result=spyCli.run(new String[]{"applicationattempt","-help"}); Assert.assertTrue(result == 0); verify(spyCli).printUsage(any(String.class),any(Options.class)); Assert.assertEquals(createApplicationAttemptCLIHelpMessage(),sysOutStream.toString()); sysOutStream.reset(); ApplicationId applicationId=ApplicationId.newInstance(1234,5); result=cli.run(new String[]{"applicationattempt","-list",applicationId.toString(),"args"}); verify(spyCli).printUsage(any(String.class),any(Options.class)); Assert.assertEquals(createApplicationAttemptCLIHelpMessage(),sysOutStream.toString()); sysOutStream.reset(); ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(applicationId,6); result=cli.run(new String[]{"applicationattempt","-status",appAttemptId.toString(),"args"}); verify(spyCli).printUsage(any(String.class),any(Options.class)); Assert.assertEquals(createApplicationAttemptCLIHelpMessage(),sysOutStream.toString()); }

UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testMoveApplicationAcrossQueues() throws Exception { ApplicationCLI cli=createAndGetAppCLI(); ApplicationId applicationId=ApplicationId.newInstance(1234,5); ApplicationReport newApplicationReport2=ApplicationReport.newInstance(applicationId,ApplicationAttemptId.newInstance(applicationId,1),"user","queue","appname","host",124,null,YarnApplicationState.FINISHED,"diagnostics","url",0,0,FinalApplicationStatus.SUCCEEDED,null,"N/A",0.53789f,"YARN",null); when(client.getApplicationReport(any(ApplicationId.class))).thenReturn(newApplicationReport2); int result=cli.run(new String[]{"application","-movetoqueue",applicationId.toString(),"-queue","targetqueue"}); assertEquals(0,result); verify(client,times(0)).moveApplicationAcrossQueues(any(ApplicationId.class),any(String.class)); verify(sysOut).println("Application " + applicationId + " has already finished "); ApplicationReport newApplicationReport=ApplicationReport.newInstance(applicationId,ApplicationAttemptId.newInstance(applicationId,1),"user","queue","appname","host",124,null,YarnApplicationState.RUNNING,"diagnostics","url",0,0,FinalApplicationStatus.SUCCEEDED,null,"N/A",0.53789f,"YARN",null); when(client.getApplicationReport(any(ApplicationId.class))).thenReturn(newApplicationReport); result=cli.run(new String[]{"application","-movetoqueue",applicationId.toString(),"-queue","targetqueue"}); assertEquals(0,result); verify(client).moveApplicationAcrossQueues(any(ApplicationId.class),any(String.class)); verify(sysOut).println("Moving application application_1234_0005 to queue targetqueue"); verify(sysOut).println("Successfully completed move."); doThrow(new ApplicationNotFoundException("Application with id '" + applicationId + "' doesn't exist in RM.")).when(client).moveApplicationAcrossQueues(applicationId,"targetqueue"); cli=createAndGetAppCLI(); try { result=cli.run(new String[]{"application","-movetoqueue",applicationId.toString(),"-queue","targetqueue"}); Assert.fail(); } catch ( Exception ex) { Assert.assertTrue(ex instanceof ApplicationNotFoundException); Assert.assertEquals("Application with id '" + applicationId + "' doesn't exist in RM.",ex.getMessage()); } }

Class: org.apache.hadoop.yarn.conf.TestHAUtil

APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testVerifyAndSetConfiguration() throws Exception { try { HAUtil.verifyAndSetConfiguration(conf); } catch ( YarnRuntimeException e) { fail("Should not throw any exceptions."); } assertEquals("Should be saved as Trimmed collection",StringUtils.getStringCollection(RM_NODE_IDS),HAUtil.getRMHAIds(conf)); assertEquals("Should be saved as Trimmed string",RM1_NODE_ID,HAUtil.getRMHAId(conf)); for ( String confKey : YarnConfiguration.getServiceAddressConfKeys(conf)) { assertEquals("RPC address not set for " + confKey,RM1_ADDRESS,conf.get(confKey)); } conf.clear(); conf.set(YarnConfiguration.RM_HA_IDS,RM1_NODE_ID); try { HAUtil.verifyAndSetConfiguration(conf); } catch ( YarnRuntimeException e) { assertEquals("YarnRuntimeException by verifyAndSetRMHAIds()",HAUtil.BAD_CONFIG_MESSAGE_PREFIX + HAUtil.getInvalidValueMessage(YarnConfiguration.RM_HA_IDS,conf.get(YarnConfiguration.RM_HA_IDS) + "\nHA mode requires atleast two RMs"),e.getMessage()); } conf.clear(); conf.set(YarnConfiguration.RM_HA_IDS,RM1_NODE_ID + "," + RM2_NODE_ID); for ( String confKey : YarnConfiguration.getServiceAddressConfKeys(conf)) { conf.set(HAUtil.addSuffix(confKey,RM1_NODE_ID),RM1_ADDRESS); conf.set(HAUtil.addSuffix(confKey,RM2_NODE_ID),RM2_ADDRESS); } try { HAUtil.verifyAndSetConfiguration(conf); } catch ( YarnRuntimeException e) { assertEquals("YarnRuntimeException by getRMId()",HAUtil.BAD_CONFIG_MESSAGE_PREFIX + HAUtil.getNeedToSetValueMessage(YarnConfiguration.RM_HA_ID),e.getMessage()); } conf.clear(); conf.set(YarnConfiguration.RM_HA_ID,RM_INVALID_NODE_ID); conf.set(YarnConfiguration.RM_HA_IDS,RM_INVALID_NODE_ID + "," + RM1_NODE_ID); for ( String confKey : YarnConfiguration.getServiceAddressConfKeys(conf)) { conf.set(confKey + RM_INVALID_NODE_ID,RM_INVALID_NODE_ID); } try { HAUtil.verifyAndSetConfiguration(conf); } catch ( YarnRuntimeException e) { assertEquals("YarnRuntimeException by addSuffix()",HAUtil.BAD_CONFIG_MESSAGE_PREFIX + HAUtil.getInvalidValueMessage(YarnConfiguration.RM_HA_ID,RM_INVALID_NODE_ID),e.getMessage()); } conf.clear(); conf.set(YarnConfiguration.RM_HA_ID,RM1_NODE_ID); conf.set(YarnConfiguration.RM_HA_IDS,RM1_NODE_ID + "," + RM2_NODE_ID); try { HAUtil.verifyAndSetConfiguration(conf); fail("Should throw YarnRuntimeException. by Configuration#set()"); } catch ( YarnRuntimeException e) { String confKey=HAUtil.addSuffix(YarnConfiguration.RM_ADDRESS,RM1_NODE_ID); assertEquals("YarnRuntimeException by Configuration#set()",HAUtil.BAD_CONFIG_MESSAGE_PREFIX + HAUtil.getNeedToSetValueMessage(HAUtil.addSuffix(YarnConfiguration.RM_HOSTNAME,RM1_NODE_ID) + " or " + confKey),e.getMessage()); } conf.clear(); conf.set(YarnConfiguration.RM_HA_IDS,RM2_NODE_ID + "," + RM3_NODE_ID); conf.set(YarnConfiguration.RM_HA_ID,RM1_NODE_ID_UNTRIMMED); for ( String confKey : YarnConfiguration.getServiceAddressConfKeys(conf)) { conf.set(HAUtil.addSuffix(confKey,RM1_NODE_ID),RM1_ADDRESS_UNTRIMMED); conf.set(HAUtil.addSuffix(confKey,RM2_NODE_ID),RM2_ADDRESS); conf.set(HAUtil.addSuffix(confKey,RM3_NODE_ID),RM3_ADDRESS); } try { HAUtil.verifyAndSetConfiguration(conf); } catch ( YarnRuntimeException e) { assertEquals("YarnRuntimeException by getRMId()'s validation",HAUtil.BAD_CONFIG_MESSAGE_PREFIX + HAUtil.getRMHAIdNeedToBeIncludedMessage("[rm2, rm3]",RM1_NODE_ID),e.getMessage()); } }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testGetRMId() throws Exception { conf.set(YarnConfiguration.RM_HA_ID,RM1_NODE_ID); assertEquals("Does not honor " + YarnConfiguration.RM_HA_ID,RM1_NODE_ID,HAUtil.getRMHAId(conf)); conf.clear(); assertNull("Return null when " + YarnConfiguration.RM_HA_ID + " is not set",HAUtil.getRMHAId(conf)); }

Class: org.apache.hadoop.yarn.conf.TestYarnConfiguration

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testGetSocketAddressForNMWithHA(){ YarnConfiguration conf=new YarnConfiguration(); conf.set(YarnConfiguration.NM_ADDRESS,"0.0.0.0:1234"); conf.setBoolean(YarnConfiguration.RM_HA_ENABLED,true); conf.set(YarnConfiguration.RM_HA_ID,"rm1"); assertTrue(HAUtil.isHAEnabled(conf)); InetSocketAddress addr=conf.getSocketAddr(YarnConfiguration.NM_ADDRESS,YarnConfiguration.DEFAULT_NM_ADDRESS,YarnConfiguration.DEFAULT_NM_PORT); assertEquals(1234,addr.getPort()); }

APIUtilityVerifier InternalCallVerifier IdentityVerifier EqualityVerifier HybridVerifier 
@Test public void testRMWebUrlSpecified() throws Exception { YarnConfiguration conf=new YarnConfiguration(); conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS,"fortesting:24543"); conf.set(YarnConfiguration.RM_ADDRESS,"rmtesting:9999"); String rmWebUrl=WebAppUtils.getRMWebAppURLWithScheme(conf); String[] parts=rmWebUrl.split(":"); Assert.assertEquals("RM Web URL Port is incrrect",24543,Integer.valueOf(parts[parts.length - 1]).intValue()); Assert.assertNotSame("RM Web Url not resolved correctly. Should not be rmtesting","http://rmtesting:24543",rmWebUrl); }

Class: org.apache.hadoop.yarn.lib.TestZKClient

TestCleaner APIUtilityVerifier BranchVerifier BooleanVerifier HybridVerifier 
@After public void tearDown() throws IOException, InterruptedException { if (zks != null) { ZKDatabase zkDb=zks.getZKDatabase(); factory.shutdown(); try { zkDb.close(); } catch ( IOException ie) { } final int PORT=Integer.parseInt(hostPort.split(":")[1]); Assert.assertTrue("waiting for server down",waitForServerDown("127.0.0.1:" + PORT,CONNECTION_TIMEOUT)); } }

APIUtilityVerifier TestInitializer BooleanVerifier HybridVerifier 
@Before public void setUp() throws IOException, InterruptedException { System.setProperty("zookeeper.preAllocSize","100"); FileTxnLog.setPreallocSize(100 * 1024); if (!BASETEST.exists()) { BASETEST.mkdirs(); } File dataDir=createTmpDir(BASETEST); zks=new ZooKeeperServer(dataDir,dataDir,3000); final int PORT=Integer.parseInt(hostPort.split(":")[1]); if (factory == null) { factory=new NIOServerCnxnFactory(); factory.configure(new InetSocketAddress(PORT),maxCnxns); } factory.startup(zks); Assert.assertTrue("waiting for server up",waitForServerUp("127.0.0.1:" + PORT,CONNECTION_TIMEOUT)); }

Class: org.apache.hadoop.yarn.logaggregation.TestAggregatedLogFormat

TestCleaner TestInitializer HybridVerifier 
@Before @After public void cleanupTestDir() throws Exception { Path workDirPath=new Path(testWorkDir.getAbsolutePath()); LOG.info("Cleaning test directory [" + workDirPath + "]"); fs.delete(workDirPath,true); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier AssumptionSetter HybridVerifier 
@Test(timeout=10000) public void testContainerLogsFileAccess() throws IOException { Assume.assumeTrue(NativeIO.isAvailable()); Configuration conf=new Configuration(); conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,"kerberos"); UserGroupInformation.setConfiguration(conf); File workDir=new File(testWorkDir,"testContainerLogsFileAccess1"); Path remoteAppLogFile=new Path(workDir.getAbsolutePath(),"aggregatedLogFile"); Path srcFileRoot=new Path(workDir.getAbsolutePath(),"srcFiles"); String data="Log File content for container : "; ApplicationId applicationId=ApplicationId.newInstance(1,1); ApplicationAttemptId applicationAttemptId=ApplicationAttemptId.newInstance(applicationId,1); ContainerId testContainerId1=ContainerId.newInstance(applicationAttemptId,1); Path appDir=new Path(srcFileRoot,testContainerId1.getApplicationAttemptId().getApplicationId().toString()); Path srcFilePath1=new Path(appDir,testContainerId1.toString()); String stdout="stdout"; String stderr="stderr"; writeSrcFile(srcFilePath1,stdout,data + testContainerId1.toString() + stdout); writeSrcFile(srcFilePath1,stderr,data + testContainerId1.toString() + stderr); UserGroupInformation ugi=UserGroupInformation.getCurrentUser(); LogWriter logWriter=new LogWriter(conf,remoteAppLogFile,ugi); LogKey logKey=new LogKey(testContainerId1); String randomUser="randomUser"; LogValue logValue=spy(new LogValue(Collections.singletonList(srcFileRoot.toString()),testContainerId1,randomUser)); when(logValue.getUser()).thenReturn(randomUser).thenReturn(ugi.getShortUserName()); logWriter.append(logKey,logValue); logWriter.close(); BufferedReader in=new BufferedReader(new FileReader(new File(remoteAppLogFile.toUri().getRawPath()))); String line; StringBuffer sb=new StringBuffer(""); while ((line=in.readLine()) != null) { LOG.info(line); sb.append(line); } line=sb.toString(); String expectedOwner=ugi.getShortUserName(); if (Path.WINDOWS) { final String adminsGroupString="Administrators"; if (Arrays.asList(ugi.getGroupNames()).contains(adminsGroupString)) { expectedOwner=adminsGroupString; } } String stdoutFile1=StringUtils.join(File.separator,Arrays.asList(new String[]{workDir.getAbsolutePath(),"srcFiles",testContainerId1.getApplicationAttemptId().getApplicationId().toString(),testContainerId1.toString(),stderr})); String message1="Owner '" + expectedOwner + "' for path "+ stdoutFile1+ " did not match expected owner '"+ randomUser+ "'"; String stdoutFile2=StringUtils.join(File.separator,Arrays.asList(new String[]{workDir.getAbsolutePath(),"srcFiles",testContainerId1.getApplicationAttemptId().getApplicationId().toString(),testContainerId1.toString(),stdout})); String message2="Owner '" + expectedOwner + "' for path "+ stdoutFile2+ " did not match expected owner '"+ ugi.getShortUserName()+ "'"; Assert.assertTrue(line.contains(message1)); Assert.assertFalse(line.contains(message2)); Assert.assertFalse(line.contains(data + testContainerId1.toString() + stderr)); Assert.assertTrue(line.contains(data + testContainerId1.toString() + stdout)); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testReadAcontainerLogs1() throws Exception { Configuration conf=new Configuration(); File workDir=new File(testWorkDir,"testReadAcontainerLogs1"); Path remoteAppLogFile=new Path(workDir.getAbsolutePath(),"aggregatedLogFile"); Path srcFileRoot=new Path(workDir.getAbsolutePath(),"srcFiles"); ContainerId testContainerId=TestContainerId.newContainerId(1,1,1,1); Path t=new Path(srcFileRoot,testContainerId.getApplicationAttemptId().getApplicationId().toString()); Path srcFilePath=new Path(t,testContainerId.toString()); int numChars=80000; writeSrcFile(srcFilePath,"stdout",numChars); UserGroupInformation ugi=UserGroupInformation.getCurrentUser(); LogWriter logWriter=new LogWriter(conf,remoteAppLogFile,ugi); LogKey logKey=new LogKey(testContainerId); LogValue logValue=new LogValue(Collections.singletonList(srcFileRoot.toString()),testContainerId,ugi.getShortUserName()); logWriter.append(logKey,logValue); logWriter.close(); FileStatus fsStatus=fs.getFileStatus(remoteAppLogFile); Assert.assertEquals("permissions on log aggregation file are wrong",FsPermission.createImmutable((short)0640),fsStatus.getPermission()); LogReader logReader=new LogReader(conf,remoteAppLogFile); LogKey rLogKey=new LogKey(); DataInputStream dis=logReader.next(rLogKey); Writer writer=new StringWriter(); LogReader.readAcontainerLogs(dis,writer); String s=writer.toString(); int expectedLength="\n\nLogType:stdout".length() + ("\nLogLength:" + numChars).length() + "\nLog Contents:\n".length()+ numChars; Assert.assertTrue("LogType not matched",s.contains("LogType:stdout")); Assert.assertTrue("LogLength not matched",s.contains("LogLength:" + numChars)); Assert.assertTrue("Log Contents not matched",s.contains("Log Contents")); StringBuilder sb=new StringBuilder(); for (int i=0; i < numChars; i++) { sb.append(filler); } String expectedContent=sb.toString(); Assert.assertTrue("Log content incorrect",s.contains(expectedContent)); Assert.assertEquals(expectedLength,s.length()); }

Class: org.apache.hadoop.yarn.server.TestMiniYARNClusterForHA

TestInitializer InternalCallVerifier BooleanVerifier HybridVerifier 
@Before public void setup() throws IOException, InterruptedException { Configuration conf=new YarnConfiguration(); conf.setBoolean(YarnConfiguration.AUTO_FAILOVER_ENABLED,false); conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS,"localhost:0"); cluster=new MiniYARNCluster(TestMiniYARNClusterForHA.class.getName(),2,1,1,1); cluster.init(conf); cluster.start(); cluster.getResourceManager(0).getRMContext().getRMAdminService().transitionToActive(new HAServiceProtocol.StateChangeRequestInfo(HAServiceProtocol.RequestSource.REQUEST_BY_USER)); assertFalse("RM never turned active",-1 == cluster.getActiveRMIndex()); }

Class: org.apache.hadoop.yarn.server.api.protocolrecords.TestRegisterNodeManagerResponse

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testRoundTrip() throws Exception { RegisterNodeManagerResponse resp=recordFactory.newRecordInstance(RegisterNodeManagerResponse.class); byte b[]={0,1,2,3,4,5}; MasterKey containerTokenMK=recordFactory.newRecordInstance(MasterKey.class); containerTokenMK.setKeyId(54321); containerTokenMK.setBytes(ByteBuffer.wrap(b)); resp.setContainerTokenMasterKey(containerTokenMK); MasterKey nmTokenMK=recordFactory.newRecordInstance(MasterKey.class); nmTokenMK.setKeyId(12345); nmTokenMK.setBytes(ByteBuffer.wrap(b)); resp.setNMTokenMasterKey(nmTokenMK); resp.setNodeAction(NodeAction.NORMAL); assertEquals(NodeAction.NORMAL,resp.getNodeAction()); assertNotNull(resp.getContainerTokenMasterKey()); assertEquals(54321,resp.getContainerTokenMasterKey().getKeyId()); assertArrayEquals(b,resp.getContainerTokenMasterKey().getBytes().array()); RegisterNodeManagerResponse respCopy=serDe(resp); assertEquals(NodeAction.NORMAL,respCopy.getNodeAction()); assertNotNull(respCopy.getContainerTokenMasterKey()); assertEquals(54321,respCopy.getContainerTokenMasterKey().getKeyId()); assertArrayEquals(b,respCopy.getContainerTokenMasterKey().getBytes().array()); assertNotNull(resp.getNMTokenMasterKey()); assertEquals(12345,resp.getNMTokenMasterKey().getKeyId()); assertArrayEquals(b,resp.getNMTokenMasterKey().getBytes().array()); respCopy=serDe(resp); assertEquals(NodeAction.NORMAL,respCopy.getNodeAction()); assertNotNull(respCopy.getNMTokenMasterKey()); assertEquals(12345,respCopy.getNMTokenMasterKey().getKeyId()); assertArrayEquals(b,respCopy.getNMTokenMasterKey().getBytes().array()); }

Class: org.apache.hadoop.yarn.server.applicationhistoryservice.TestApplicationHistoryClientService

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testApplications() throws IOException, YarnException { ApplicationId appId=null; appId=ApplicationId.newInstance(0,1); writeApplicationStartData(appId); writeApplicationFinishData(appId); ApplicationId appId1=ApplicationId.newInstance(0,2); writeApplicationStartData(appId1); writeApplicationFinishData(appId1); GetApplicationsRequest request=GetApplicationsRequest.newInstance(); GetApplicationsResponse response=historyServer.getClientService().getClientHandler().getApplications(request); List appReport=response.getApplicationList(); Assert.assertNotNull(appReport); Assert.assertEquals(appId,appReport.get(0).getApplicationId()); Assert.assertEquals(appId1,appReport.get(1).getApplicationId()); }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testContainers() throws IOException, YarnException { ApplicationId appId=ApplicationId.newInstance(0,1); writeApplicationStartData(appId); ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,1); ContainerId containerId=ContainerId.newInstance(appAttemptId,1); ContainerId containerId1=ContainerId.newInstance(appAttemptId,2); writeContainerStartData(containerId); writeContainerFinishData(containerId); writeContainerStartData(containerId1); writeContainerFinishData(containerId1); writeApplicationFinishData(appId); GetContainersRequest request=GetContainersRequest.newInstance(appAttemptId); GetContainersResponse response=historyServer.getClientService().getClientHandler().getContainers(request); List containers=response.getContainerList(); Assert.assertNotNull(containers); Assert.assertEquals(containerId,containers.get(1).getContainerId()); Assert.assertEquals(containerId1,containers.get(0).getContainerId()); }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testApplicationAttemptReport() throws IOException, YarnException { ApplicationId appId=ApplicationId.newInstance(0,1); ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,1); writeApplicationAttemptStartData(appAttemptId); writeApplicationAttemptFinishData(appAttemptId); GetApplicationAttemptReportRequest request=GetApplicationAttemptReportRequest.newInstance(appAttemptId); GetApplicationAttemptReportResponse response=historyServer.getClientService().getClientHandler().getApplicationAttemptReport(request); ApplicationAttemptReport attemptReport=response.getApplicationAttemptReport(); Assert.assertNotNull(attemptReport); Assert.assertEquals("appattempt_0_0001_000001",attemptReport.getApplicationAttemptId().toString()); }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testApplicationAttempts() throws IOException, YarnException { ApplicationId appId=ApplicationId.newInstance(0,1); ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,1); ApplicationAttemptId appAttemptId1=ApplicationAttemptId.newInstance(appId,2); writeApplicationAttemptStartData(appAttemptId); writeApplicationAttemptFinishData(appAttemptId); writeApplicationAttemptStartData(appAttemptId1); writeApplicationAttemptFinishData(appAttemptId1); GetApplicationAttemptsRequest request=GetApplicationAttemptsRequest.newInstance(appId); GetApplicationAttemptsResponse response=historyServer.getClientService().getClientHandler().getApplicationAttempts(request); List attemptReports=response.getApplicationAttemptList(); Assert.assertNotNull(attemptReports); Assert.assertEquals(appAttemptId,attemptReports.get(0).getApplicationAttemptId()); Assert.assertEquals(appAttemptId1,attemptReports.get(1).getApplicationAttemptId()); }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testApplicationReport() throws IOException, YarnException { ApplicationId appId=null; appId=ApplicationId.newInstance(0,1); writeApplicationStartData(appId); writeApplicationFinishData(appId); GetApplicationReportRequest request=GetApplicationReportRequest.newInstance(appId); GetApplicationReportResponse response=historyServer.getClientService().getClientHandler().getApplicationReport(request); ApplicationReport appReport=response.getApplicationReport(); Assert.assertNotNull(appReport); Assert.assertEquals("application_0_0001",appReport.getApplicationId().toString()); Assert.assertEquals("test type",appReport.getApplicationType().toString()); Assert.assertEquals("test queue",appReport.getQueue().toString()); }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testContainerReport() throws IOException, YarnException { ApplicationId appId=ApplicationId.newInstance(0,1); writeApplicationStartData(appId); ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,1); ContainerId containerId=ContainerId.newInstance(appAttemptId,1); writeContainerStartData(containerId); writeContainerFinishData(containerId); writeApplicationFinishData(appId); GetContainerReportRequest request=GetContainerReportRequest.newInstance(containerId); GetContainerReportResponse response=historyServer.getClientService().getClientHandler().getContainerReport(request); ContainerReport container=response.getContainerReport(); Assert.assertNotNull(container); Assert.assertEquals(containerId,container.getContainerId()); Assert.assertEquals(expectedLogUrl,container.getLogUrl()); }

Class: org.apache.hadoop.yarn.server.applicationhistoryservice.TestApplicationHistoryManagerImpl

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testApplicationReport() throws IOException, YarnException { ApplicationId appId=null; appId=ApplicationId.newInstance(0,1); writeApplicationStartData(appId); writeApplicationFinishData(appId); ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,1); writeApplicationAttemptStartData(appAttemptId); writeApplicationAttemptFinishData(appAttemptId); ApplicationReport appReport=applicationHistoryManagerImpl.getApplication(appId); Assert.assertNotNull(appReport); Assert.assertEquals(appId,appReport.getApplicationId()); Assert.assertEquals(appAttemptId,appReport.getCurrentApplicationAttemptId()); Assert.assertEquals(appAttemptId.toString(),appReport.getHost()); Assert.assertEquals("test type",appReport.getApplicationType().toString()); Assert.assertEquals("test queue",appReport.getQueue().toString()); }

Class: org.apache.hadoop.yarn.server.applicationhistoryservice.TestApplicationHistoryServer

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=50000) public void testStartStopServer() throws Exception { historyServer=new ApplicationHistoryServer(); Configuration config=new YarnConfiguration(); historyServer.init(config); assertEquals(STATE.INITED,historyServer.getServiceState()); assertEquals(4,historyServer.getServices().size()); ApplicationHistoryClientService historyService=historyServer.getClientService(); assertNotNull(historyServer.getClientService()); assertEquals(STATE.INITED,historyService.getServiceState()); historyServer.start(); assertEquals(STATE.STARTED,historyServer.getServiceState()); assertEquals(STATE.STARTED,historyService.getServiceState()); historyServer.stop(); assertEquals(STATE.STOPPED,historyServer.getServiceState()); }

UtilityVerifier EqualityVerifier HybridVerifier 
@Test(timeout=60000) public void testLaunch() throws Exception { ExitUtil.disableSystemExit(); try { historyServer=ApplicationHistoryServer.launchAppHistoryServer(new String[0]); } catch ( ExitUtil.ExitException e) { assertEquals(0,e.status); ExitUtil.resetFirstExitException(); fail(); } }

Class: org.apache.hadoop.yarn.server.applicationhistoryservice.TestFileSystemApplicationHistoryStore

UtilityVerifier BooleanVerifier HybridVerifier 
@Test public void testWriteAfterApplicationFinish() throws IOException { LOG.info("Starting testWriteAfterApplicationFinish"); ApplicationId appId=ApplicationId.newInstance(0,1); writeApplicationStartData(appId); writeApplicationFinishData(appId); ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,1); try { writeApplicationAttemptStartData(appAttemptId); Assert.fail(); } catch ( IOException e) { Assert.assertTrue(e.getMessage().contains("is not opened")); } try { writeApplicationAttemptFinishData(appAttemptId); Assert.fail(); } catch ( IOException e) { Assert.assertTrue(e.getMessage().contains("is not opened")); } ContainerId containerId=ContainerId.newInstance(appAttemptId,1); try { writeContainerStartData(containerId); Assert.fail(); } catch ( IOException e) { Assert.assertTrue(e.getMessage().contains("is not opened")); } try { writeContainerFinishData(containerId); Assert.fail(); } catch ( IOException e) { Assert.assertTrue(e.getMessage().contains("is not opened")); } }

Class: org.apache.hadoop.yarn.server.applicationhistoryservice.TestMemoryApplicationHistoryStore

APIUtilityVerifier IterativeVerifier UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testReadWriteApplicationAttemptHistory() throws Exception { ApplicationId appId=ApplicationId.newInstance(0,1); ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,1); try { writeApplicationAttemptFinishData(appAttemptId); Assert.fail(); } catch ( IOException e) { Assert.assertTrue(e.getMessage().contains("is stored before the start information")); } int numAppAttempts=5; writeApplicationStartData(appId); for (int i=1; i <= numAppAttempts; ++i) { appAttemptId=ApplicationAttemptId.newInstance(appId,i); writeApplicationAttemptStartData(appAttemptId); writeApplicationAttemptFinishData(appAttemptId); } Assert.assertEquals(numAppAttempts,store.getApplicationAttempts(appId).size()); for (int i=1; i <= numAppAttempts; ++i) { appAttemptId=ApplicationAttemptId.newInstance(appId,i); ApplicationAttemptHistoryData data=store.getApplicationAttempt(appAttemptId); Assert.assertNotNull(data); Assert.assertEquals(appAttemptId.toString(),data.getHost()); Assert.assertEquals(appAttemptId.toString(),data.getDiagnosticsInfo()); } writeApplicationFinishData(appId); appAttemptId=ApplicationAttemptId.newInstance(appId,1); try { writeApplicationAttemptStartData(appAttemptId); Assert.fail(); } catch ( IOException e) { Assert.assertTrue(e.getMessage().contains("is already stored")); } try { writeApplicationAttemptFinishData(appAttemptId); Assert.fail(); } catch ( IOException e) { Assert.assertTrue(e.getMessage().contains("is already stored")); } }

APIUtilityVerifier IterativeVerifier UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testReadWriteApplicationHistory() throws Exception { ApplicationId appId=ApplicationId.newInstance(0,1); try { writeApplicationFinishData(appId); Assert.fail(); } catch ( IOException e) { Assert.assertTrue(e.getMessage().contains("is stored before the start information")); } int numApps=5; for (int i=1; i <= numApps; ++i) { appId=ApplicationId.newInstance(0,i); writeApplicationStartData(appId); writeApplicationFinishData(appId); } Assert.assertEquals(numApps,store.getAllApplications().size()); for (int i=1; i <= numApps; ++i) { appId=ApplicationId.newInstance(0,i); ApplicationHistoryData data=store.getApplication(appId); Assert.assertNotNull(data); Assert.assertEquals(appId.toString(),data.getApplicationName()); Assert.assertEquals(appId.toString(),data.getDiagnosticsInfo()); } appId=ApplicationId.newInstance(0,1); try { writeApplicationStartData(appId); Assert.fail(); } catch ( IOException e) { Assert.assertTrue(e.getMessage().contains("is already stored")); } try { writeApplicationFinishData(appId); Assert.fail(); } catch ( IOException e) { Assert.assertTrue(e.getMessage().contains("is already stored")); } }

APIUtilityVerifier IterativeVerifier UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testReadWriteContainerHistory() throws Exception { ApplicationId appId=ApplicationId.newInstance(0,1); ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,1); ContainerId containerId=ContainerId.newInstance(appAttemptId,1); try { writeContainerFinishData(containerId); Assert.fail(); } catch ( IOException e) { Assert.assertTrue(e.getMessage().contains("is stored before the start information")); } writeApplicationAttemptStartData(appAttemptId); int numContainers=5; for (int i=1; i <= numContainers; ++i) { containerId=ContainerId.newInstance(appAttemptId,i); writeContainerStartData(containerId); writeContainerFinishData(containerId); } Assert.assertEquals(numContainers,store.getContainers(appAttemptId).size()); for (int i=1; i <= numContainers; ++i) { containerId=ContainerId.newInstance(appAttemptId,i); ContainerHistoryData data=store.getContainer(containerId); Assert.assertNotNull(data); Assert.assertEquals(Priority.newInstance(containerId.getId()),data.getPriority()); Assert.assertEquals(containerId.toString(),data.getDiagnosticsInfo()); } ContainerHistoryData masterContainer=store.getAMContainer(appAttemptId); Assert.assertNotNull(masterContainer); Assert.assertEquals(ContainerId.newInstance(appAttemptId,1),masterContainer.getContainerId()); writeApplicationAttemptFinishData(appAttemptId); containerId=ContainerId.newInstance(appAttemptId,1); try { writeContainerStartData(containerId); Assert.fail(); } catch ( IOException e) { Assert.assertTrue(e.getMessage().contains("is already stored")); } try { writeContainerFinishData(containerId); Assert.fail(); } catch ( IOException e) { Assert.assertTrue(e.getMessage().contains("is already stored")); } }

Class: org.apache.hadoop.yarn.server.applicationhistoryservice.webapp.TestAHSWebServices

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testInvalidUri2() throws JSONException, Exception { WebResource r=resource(); String responseStr=""; try { responseStr=r.accept(MediaType.APPLICATION_JSON).get(String.class); fail("should have thrown exception on invalid uri"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.NOT_FOUND,response.getClientResponseStatus()); WebServicesTestUtils.checkStringMatch("error string exists and shouldn't","",responseStr); } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testInvalidAccept() throws JSONException, Exception { WebResource r=resource(); String responseStr=""; try { responseStr=r.path("ws").path("v1").path("applicationhistory").accept(MediaType.TEXT_PLAIN).get(String.class); fail("should have thrown exception on invalid uri"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.INTERNAL_SERVER_ERROR,response.getClientResponseStatus()); WebServicesTestUtils.checkStringMatch("error string exists and shouldn't","",responseStr); } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testInvalidUri() throws JSONException, Exception { WebResource r=resource(); String responseStr=""; try { responseStr=r.path("ws").path("v1").path("applicationhistory").path("bogus").accept(MediaType.APPLICATION_JSON).get(String.class); fail("should have thrown exception on invalid uri"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.NOT_FOUND,response.getClientResponseStatus()); WebServicesTestUtils.checkStringMatch("error string exists and shouldn't","",responseStr); } }

Class: org.apache.hadoop.yarn.server.nodemanager.TestDefaultContainerExecutor

APIUtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier HybridVerifier 
@Test public void testContainerLaunchError() throws IOException, InterruptedException { Path localDir=new Path(BASE_TMP_PATH,"localDir"); List localDirs=new ArrayList(); localDirs.add(localDir.toString()); List logDirs=new ArrayList(); Path logDir=new Path(BASE_TMP_PATH,"logDir"); logDirs.add(logDir.toString()); Configuration conf=new Configuration(); conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY,"077"); conf.set(YarnConfiguration.NM_LOCAL_DIRS,localDir.toString()); conf.set(YarnConfiguration.NM_LOG_DIRS,logDir.toString()); FileContext lfs=FileContext.getLocalFSFileContext(conf); DefaultContainerExecutor mockExec=spy(new DefaultContainerExecutor(lfs)); mockExec.setConf(conf); doAnswer(new Answer(){ @Override public Object answer( InvocationOnMock invocationOnMock) throws Throwable { String diagnostics=(String)invocationOnMock.getArguments()[0]; assertTrue("Invalid Diagnostics message: " + diagnostics,diagnostics.contains("No such file or directory")); return null; } } ).when(mockExec).logOutput(any(String.class)); String appSubmitter="nobody"; String appId="APP_ID"; String containerId="CONTAINER_ID"; Container container=mock(Container.class); ContainerId cId=mock(ContainerId.class); ContainerLaunchContext context=mock(ContainerLaunchContext.class); HashMap env=new HashMap(); when(container.getContainerId()).thenReturn(cId); when(container.getLaunchContext()).thenReturn(context); try { doAnswer(new Answer(){ @Override public Object answer( InvocationOnMock invocationOnMock) throws Throwable { ContainerDiagnosticsUpdateEvent event=(ContainerDiagnosticsUpdateEvent)invocationOnMock.getArguments()[0]; assertTrue("Invalid Diagnostics message: " + event.getDiagnosticsUpdate(),event.getDiagnosticsUpdate().contains("No such file or directory")); return null; } } ).when(container).handle(any(ContainerDiagnosticsUpdateEvent.class)); when(cId.toString()).thenReturn(containerId); when(cId.getApplicationAttemptId()).thenReturn(ApplicationAttemptId.newInstance(ApplicationId.newInstance(0,1),0)); when(context.getEnvironment()).thenReturn(env); mockExec.createUserLocalDirs(localDirs,appSubmitter); mockExec.createUserCacheDirs(localDirs,appSubmitter); mockExec.createAppDirs(localDirs,appSubmitter,appId); mockExec.createAppLogDirs(appId,logDirs); Path scriptPath=new Path("file:///bin/echo"); Path tokensPath=new Path("file:///dev/null"); Path workDir=localDir; Path pidFile=new Path(workDir,"pid.txt"); mockExec.init(); mockExec.activateContainer(cId,pidFile); int ret=mockExec.launchContainer(container,scriptPath,tokensPath,appSubmitter,appId,workDir,localDirs,localDirs); Assert.assertNotSame(0,ret); } finally { mockExec.deleteAsUser(appSubmitter,localDir); mockExec.deleteAsUser(appSubmitter,logDir); } }

Class: org.apache.hadoop.yarn.server.nodemanager.TestDirectoryCollection

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testCreateDirectories() throws IOException { Configuration conf=new Configuration(); conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY,"077"); FileContext localFs=FileContext.getLocalFSFileContext(conf); String dirA=new File(testDir,"dirA").getPath(); String dirB=new File(dirA,"dirB").getPath(); String dirC=new File(testDir,"dirC").getPath(); Path pathC=new Path(dirC); FsPermission permDirC=new FsPermission((short)0710); localFs.mkdir(pathC,null,true); localFs.setPermission(pathC,permDirC); String[] dirs={dirA,dirB,dirC}; DirectoryCollection dc=new DirectoryCollection(dirs,conf.getFloat(YarnConfiguration.NM_MAX_PER_DISK_UTILIZATION_PERCENTAGE,YarnConfiguration.DEFAULT_NM_MAX_PER_DISK_UTILIZATION_PERCENTAGE)); FsPermission defaultPerm=FsPermission.getDefault().applyUMask(new FsPermission((short)FsPermission.DEFAULT_UMASK)); boolean createResult=dc.createNonExistentDirs(localFs,defaultPerm); Assert.assertTrue(createResult); FileStatus status=localFs.getFileStatus(new Path(dirA)); Assert.assertEquals("local dir parent not created with proper permissions",defaultPerm,status.getPermission()); status=localFs.getFileStatus(new Path(dirB)); Assert.assertEquals("local dir not created with proper permissions",defaultPerm,status.getPermission()); status=localFs.getFileStatus(pathC); Assert.assertEquals("existing local directory permissions modified",permDirC,status.getPermission()); }

Class: org.apache.hadoop.yarn.server.nodemanager.TestLinuxContainerExecutor

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier 
@Test public void testContainerKill() throws Exception { if (!shouldRun()) { return; } final ContainerId sleepId=getNextContainerId(); Thread t=new Thread(){ public void run(){ try { runAndBlock(sleepId,"sleep","100"); } catch ( IOException e) { LOG.warn("Caught exception while running sleep",e); } } } ; t.setDaemon(true); t.start(); assertTrue(t.isAlive()); String pid=null; int count=10; while ((pid=exec.getProcessId(sleepId)) == null && count > 0) { LOG.info("Sleeping for 200 ms before checking for pid "); Thread.sleep(200); count--; } assertNotNull(pid); LOG.info("Going to killing the process."); exec.signalContainer(appSubmitter,pid,Signal.TERM); LOG.info("sleeping for 100ms to let the sleep be killed"); Thread.sleep(100); assertFalse(t.isAlive()); }

Class: org.apache.hadoop.yarn.server.nodemanager.TestLinuxContainerExecutorWithMocks

TestInitializer AssumptionSetter HybridVerifier 
@Before public void setup(){ assumeTrue(!Path.WINDOWS); File f=new File("./src/test/resources/mock-container-executor"); if (!FileUtil.canExecute(f)) { FileUtil.setExecutable(f,true); } String executorPath=f.getAbsolutePath(); Configuration conf=new Configuration(); conf.set(YarnConfiguration.NM_LINUX_CONTAINER_EXECUTOR_PATH,executorPath); mockExec=new LinuxContainerExecutor(); dirsHandler=new LocalDirsHandlerService(); dirsHandler.init(conf); mockExec.setConf(conf); }

UtilityVerifier EqualityVerifier HybridVerifier 
@Test(timeout=5000) public void testStartLocalizer() throws IOException { InetSocketAddress address=InetSocketAddress.createUnresolved("localhost",8040); Path nmPrivateCTokensPath=new Path("file:///bin/nmPrivateCTokensPath"); try { mockExec.startLocalizer(nmPrivateCTokensPath,address,"test","application_0","12345",dirsHandler.getLocalDirs(),dirsHandler.getLogDirs()); List result=readMockParams(); Assert.assertEquals(result.size(),17); Assert.assertEquals(result.get(0),YarnConfiguration.DEFAULT_NM_NONSECURE_MODE_LOCAL_USER); Assert.assertEquals(result.get(1),"test"); Assert.assertEquals(result.get(2),"0"); Assert.assertEquals(result.get(3),"application_0"); Assert.assertEquals(result.get(4),"/bin/nmPrivateCTokensPath"); Assert.assertEquals(result.get(8),"-classpath"); Assert.assertEquals(result.get(11),"org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ContainerLocalizer"); Assert.assertEquals(result.get(12),"test"); Assert.assertEquals(result.get(13),"application_0"); Assert.assertEquals(result.get(14),"12345"); Assert.assertEquals(result.get(15),"localhost"); Assert.assertEquals(result.get(16),"8040"); } catch ( InterruptedException e) { LOG.error("Error:" + e.getMessage(),e); Assert.fail(); } }

InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier 
@Test public void testContainerLaunchError() throws IOException { File f=new File("./src/test/resources/mock-container-executer-with-error"); if (!FileUtil.canExecute(f)) { FileUtil.setExecutable(f,true); } String executorPath=f.getAbsolutePath(); Configuration conf=new Configuration(); conf.set(YarnConfiguration.NM_LINUX_CONTAINER_EXECUTOR_PATH,executorPath); conf.set(YarnConfiguration.NM_LOCAL_DIRS,"file:///bin/echo"); conf.set(YarnConfiguration.NM_LOG_DIRS,"file:///dev/null"); mockExec=spy(new LinuxContainerExecutor()); doAnswer(new Answer(){ @Override public Object answer( InvocationOnMock invocationOnMock) throws Throwable { String diagnostics=(String)invocationOnMock.getArguments()[0]; assertTrue("Invalid Diagnostics message: " + diagnostics,diagnostics.contains("badcommand")); return null; } } ).when(mockExec).logOutput(any(String.class)); dirsHandler=new LocalDirsHandlerService(); dirsHandler.init(conf); mockExec.setConf(conf); String appSubmitter="nobody"; String cmd=String.valueOf(LinuxContainerExecutor.Commands.LAUNCH_CONTAINER.getValue()); String appId="APP_ID"; String containerId="CONTAINER_ID"; Container container=mock(Container.class); ContainerId cId=mock(ContainerId.class); ContainerLaunchContext context=mock(ContainerLaunchContext.class); HashMap env=new HashMap(); when(container.getContainerId()).thenReturn(cId); when(container.getLaunchContext()).thenReturn(context); doAnswer(new Answer(){ @Override public Object answer( InvocationOnMock invocationOnMock) throws Throwable { ContainerDiagnosticsUpdateEvent event=(ContainerDiagnosticsUpdateEvent)invocationOnMock.getArguments()[0]; assertTrue("Invalid Diagnostics message: " + event.getDiagnosticsUpdate(),event.getDiagnosticsUpdate().contains("badcommand")); return null; } } ).when(container).handle(any(ContainerDiagnosticsUpdateEvent.class)); when(cId.toString()).thenReturn(containerId); when(context.getEnvironment()).thenReturn(env); Path scriptPath=new Path("file:///bin/echo"); Path tokensPath=new Path("file:///dev/null"); Path workDir=new Path("/tmp"); Path pidFile=new Path(workDir,"pid.txt"); mockExec.activateContainer(cId,pidFile); int ret=mockExec.launchContainer(container,scriptPath,tokensPath,appSubmitter,appId,workDir,dirsHandler.getLocalDirs(),dirsHandler.getLogDirs()); Assert.assertNotSame(0,ret); assertEquals(Arrays.asList(YarnConfiguration.DEFAULT_NM_NONSECURE_MODE_LOCAL_USER,appSubmitter,cmd,appId,containerId,workDir.toString(),"/bin/echo","/dev/null",pidFile.toString(),StringUtils.join(",",dirsHandler.getLocalDirs()),StringUtils.join(",",dirsHandler.getLogDirs()),"cgroups=none"),readMockParams()); }

Class: org.apache.hadoop.yarn.server.nodemanager.TestLocalDirsHandlerService

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testValidPathsDirHandlerService(){ Configuration conf=new YarnConfiguration(); String localDir1=new File("file:///" + testDir,"localDir1").getPath(); String localDir2=new File("hdfs:///" + testDir,"localDir2").getPath(); conf.set(YarnConfiguration.NM_LOCAL_DIRS,localDir1 + "," + localDir2); String logDir1=new File("file:///" + testDir,"logDir1").getPath(); conf.set(YarnConfiguration.NM_LOG_DIRS,logDir1); LocalDirsHandlerService dirSvc=new LocalDirsHandlerService(); try { dirSvc.init(conf); Assert.fail("Service should have thrown an exception due to wrong URI"); } catch ( YarnRuntimeException e) { } Assert.assertEquals("Service should not be inited",STATE.STOPPED,dirSvc.getServiceState()); }

Class: org.apache.hadoop.yarn.server.nodemanager.TestNodeManagerReboot

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=2000000) public void testClearLocalDirWhenNodeReboot() throws IOException, YarnException, InterruptedException { nm=new MyNodeManager(); nm.start(); final ContainerManagementProtocol containerManager=nm.getContainerManager(); createFiles(nmLocalDir.getAbsolutePath(),ContainerLocalizer.FILECACHE,100); localResourceDir.mkdirs(); ContainerLaunchContext containerLaunchContext=Records.newRecord(ContainerLaunchContext.class); ContainerId cId=createContainerId(); URL localResourceUri=ConverterUtils.getYarnUrlFromPath(localFS.makeQualified(new Path(localResourceDir.getAbsolutePath()))); LocalResource localResource=LocalResource.newInstance(localResourceUri,LocalResourceType.FILE,LocalResourceVisibility.APPLICATION,-1,localResourceDir.lastModified()); String destinationFile="dest_file"; Map localResources=new HashMap(); localResources.put(destinationFile,localResource); containerLaunchContext.setLocalResources(localResources); List commands=new ArrayList(); containerLaunchContext.setCommands(commands); NodeId nodeId=nm.getNMContext().getNodeId(); StartContainerRequest scRequest=StartContainerRequest.newInstance(containerLaunchContext,TestContainerManager.createContainerToken(cId,0,nodeId,destinationFile,nm.getNMContext().getContainerTokenSecretManager())); List list=new ArrayList(); list.add(scRequest); final StartContainersRequest allRequests=StartContainersRequest.newInstance(list); final UserGroupInformation currentUser=UserGroupInformation.createRemoteUser(cId.getApplicationAttemptId().toString()); NMTokenIdentifier nmIdentifier=new NMTokenIdentifier(cId.getApplicationAttemptId(),nodeId,user,123); currentUser.addTokenIdentifier(nmIdentifier); currentUser.doAs(new PrivilegedExceptionAction(){ @Override public Void run() throws YarnException, IOException { nm.getContainerManager().startContainers(allRequests); return null; } } ); List containerIds=new ArrayList(); containerIds.add(cId); GetContainerStatusesRequest request=GetContainerStatusesRequest.newInstance(containerIds); Container container=nm.getNMContext().getContainers().get(request.getContainerIds().get(0)); final int MAX_TRIES=20; int numTries=0; while (!container.getContainerState().equals(ContainerState.DONE) && numTries <= MAX_TRIES) { try { Thread.sleep(500); } catch ( InterruptedException ex) { } numTries++; } Assert.assertEquals(ContainerState.DONE,container.getContainerState()); Assert.assertTrue("The container should create a subDir named currentUser: " + user + "under localDir/usercache",numOfLocalDirs(nmLocalDir.getAbsolutePath(),ContainerLocalizer.USERCACHE) > 0); Assert.assertTrue("There should be files or Dirs under nm_private when " + "container is launched",numOfLocalDirs(nmLocalDir.getAbsolutePath(),ResourceLocalizationService.NM_PRIVATE_DIR) > 0); nm.stop(); nm=new MyNodeManager(); nm.start(); numTries=0; while ((numOfLocalDirs(nmLocalDir.getAbsolutePath(),ContainerLocalizer.USERCACHE) > 0 || numOfLocalDirs(nmLocalDir.getAbsolutePath(),ContainerLocalizer.FILECACHE) > 0 || numOfLocalDirs(nmLocalDir.getAbsolutePath(),ResourceLocalizationService.NM_PRIVATE_DIR) > 0) && numTries < MAX_TRIES) { try { Thread.sleep(500); } catch ( InterruptedException ex) { } numTries++; } Assert.assertTrue("After NM reboots, all local files should be deleted",numOfLocalDirs(nmLocalDir.getAbsolutePath(),ContainerLocalizer.USERCACHE) == 0 && numOfLocalDirs(nmLocalDir.getAbsolutePath(),ContainerLocalizer.FILECACHE) == 0 && numOfLocalDirs(nmLocalDir.getAbsolutePath(),ResourceLocalizationService.NM_PRIVATE_DIR) == 0); verify(delService,times(1)).delete((String)isNull(),argThat(new PathInclude(ResourceLocalizationService.NM_PRIVATE_DIR + "_DEL_"))); verify(delService,times(1)).delete((String)isNull(),argThat(new PathInclude(ContainerLocalizer.FILECACHE + "_DEL_"))); verify(delService,times(1)).scheduleFileDeletionTask(argThat(new FileDeletionInclude(user,null,new String[]{destinationFile}))); verify(delService,times(1)).scheduleFileDeletionTask(argThat(new FileDeletionInclude(null,ContainerLocalizer.USERCACHE + "_DEL_",new String[]{}))); }

Class: org.apache.hadoop.yarn.server.nodemanager.TestNodeManagerResync

BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testNMSentContainerStatusOnResync() throws Exception { final ContainerStatus testCompleteContainer=TestNodeStatusUpdater.createContainerStatus(2,ContainerState.COMPLETE); final Container container=TestNodeStatusUpdater.getMockContainer(testCompleteContainer); NMContainerStatus report=createNMContainerStatus(2,ContainerState.COMPLETE); when(container.getNMContainerStatus()).thenReturn(report); NodeManager nm=new NodeManager(){ int registerCount=0; @Override protected NodeStatusUpdater createNodeStatusUpdater( Context context, Dispatcher dispatcher, NodeHealthCheckerService healthChecker){ return new TestNodeStatusUpdaterResync(context,dispatcher,healthChecker,metrics){ @Override protected ResourceTracker createResourceTracker(){ return new MockResourceTracker(){ @Override public RegisterNodeManagerResponse registerNodeManager( RegisterNodeManagerRequest request) throws YarnException, IOException { if (registerCount == 0) { try { Assert.assertEquals(0,request.getNMContainerStatuses().size()); } catch ( AssertionError error) { error.printStackTrace(); assertionFailedInThread.set(true); } getNMContext().getContainers().put(testCompleteContainer.getContainerId(),container); } else { List statuses=request.getNMContainerStatuses(); try { Assert.assertEquals(1,statuses.size()); Assert.assertEquals(testCompleteContainer.getContainerId(),statuses.get(0).getContainerId()); } catch ( AssertionError error) { error.printStackTrace(); assertionFailedInThread.set(true); } } registerCount++; return super.registerNodeManager(request); } @Override public NodeHeartbeatResponse nodeHeartbeat( NodeHeartbeatRequest request){ List statuses=request.getNodeStatus().getContainersStatuses(); try { Assert.assertEquals(1,statuses.size()); Assert.assertEquals(testCompleteContainer.getContainerId(),statuses.get(0).getContainerId()); } catch ( AssertionError error) { error.printStackTrace(); assertionFailedInThread.set(true); } return YarnServerBuilderUtils.newNodeHeartbeatResponse(1,NodeAction.RESYNC,null,null,null,null,1000L); } } ; } } ; } } ; YarnConfiguration conf=createNMConfig(); nm.init(conf); nm.start(); try { syncBarrier.await(); } catch ( BrokenBarrierException e) { } Assert.assertFalse(assertionFailedInThread.get()); nm.stop(); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@SuppressWarnings("unchecked") @Test(timeout=10000) public void testNMshutdownWhenResyncThrowException() throws IOException, InterruptedException, YarnException { NodeManager nm=new TestNodeManager3(); YarnConfiguration conf=createNMConfig(); nm.init(conf); nm.start(); Assert.assertEquals(1,((TestNodeManager3)nm).getNMRegistrationCount()); nm.getNMDispatcher().getEventHandler().handle(new NodeManagerEvent(NodeManagerEventType.RESYNC)); synchronized (isNMShutdownCalled) { while (isNMShutdownCalled.get() == false) { try { isNMShutdownCalled.wait(); } catch ( InterruptedException e) { } } } Assert.assertTrue("NM shutdown not called.",isNMShutdownCalled.get()); nm.stop(); }

Class: org.apache.hadoop.yarn.server.nodemanager.TestNodeStatusUpdater

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testNodeDecommision() throws Exception { nm=getNodeManager(NodeAction.SHUTDOWN); YarnConfiguration conf=createNMConfig(); nm.init(conf); Assert.assertEquals(STATE.INITED,nm.getServiceState()); nm.start(); int waitCount=0; while (heartBeatID < 1 && waitCount++ != 200) { Thread.sleep(500); } Assert.assertFalse(heartBeatID < 1); Assert.assertTrue(nm.getNMContext().getDecommissioned()); waitCount=0; while (nm.getServiceState() != STATE.STOPPED && waitCount++ != 20) { LOG.info("Waiting for NM to stop.."); Thread.sleep(1000); } Assert.assertEquals(STATE.STOPPED,nm.getServiceState()); }

UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier 
@Test(timeout=150000) public void testNMConnectionToRM() throws Exception { final long delta=50000; final long connectionWaitMs=5000; final long connectionRetryIntervalMs=1000; final long rmStartIntervalMS=2 * 1000; conf.setLong(YarnConfiguration.RESOURCEMANAGER_CONNECT_MAX_WAIT_MS,connectionWaitMs); conf.setLong(YarnConfiguration.RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_MS,connectionRetryIntervalMs); NodeManagerWithCustomNodeStatusUpdater nmWithUpdater; nm=nmWithUpdater=new NodeManagerWithCustomNodeStatusUpdater(){ @Override protected NodeStatusUpdater createUpdater( Context context, Dispatcher dispatcher, NodeHealthCheckerService healthChecker){ NodeStatusUpdater nodeStatusUpdater=new MyNodeStatusUpdater4(context,dispatcher,healthChecker,metrics,rmStartIntervalMS,true); return nodeStatusUpdater; } } ; nm.init(conf); long waitStartTime=System.currentTimeMillis(); try { nm.start(); Assert.fail("NM should have failed to start due to RM connect failure"); } catch ( Exception e) { long t=System.currentTimeMillis(); long duration=t - waitStartTime; boolean waitTimeValid=(duration >= connectionWaitMs) && (duration < (connectionWaitMs + delta)); if (!waitTimeValid) { throw new Exception("NM should have tried re-connecting to RM during " + "period of at least " + connectionWaitMs + " ms, but "+ "stopped retrying within "+ (connectionWaitMs + delta)+ " ms: "+ e,e); } } nm=nmWithUpdater=new NodeManagerWithCustomNodeStatusUpdater(){ @Override protected NodeStatusUpdater createUpdater( Context context, Dispatcher dispatcher, NodeHealthCheckerService healthChecker){ NodeStatusUpdater nodeStatusUpdater=new MyNodeStatusUpdater4(context,dispatcher,healthChecker,metrics,rmStartIntervalMS,false); return nodeStatusUpdater; } } ; nm.init(conf); NodeStatusUpdater updater=nmWithUpdater.getUpdater(); Assert.assertNotNull("Updater not yet created ",updater); waitStartTime=System.currentTimeMillis(); try { nm.start(); } catch ( Exception ex) { LOG.error("NM should have started successfully " + "after connecting to RM.",ex); throw ex; } long duration=System.currentTimeMillis() - waitStartTime; MyNodeStatusUpdater4 myUpdater=(MyNodeStatusUpdater4)updater; Assert.assertTrue("NM started before updater triggered",myUpdater.isTriggered()); Assert.assertTrue("NM should have connected to RM after " + "the start interval of " + rmStartIntervalMS + ": actual "+ duration+ " "+ myUpdater,(duration >= rmStartIntervalMS)); Assert.assertTrue("NM should have connected to RM less than " + (rmStartIntervalMS + delta) + " milliseconds of RM starting up: actual "+ duration+ " "+ myUpdater,(duration < (rmStartIntervalMS + delta))); }

IterativeVerifier BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
@Test public void testNMRegistration() throws InterruptedException { nm=new NodeManager(){ @Override protected NodeStatusUpdater createNodeStatusUpdater( Context context, Dispatcher dispatcher, NodeHealthCheckerService healthChecker){ return new MyNodeStatusUpdater(context,dispatcher,healthChecker,metrics); } } ; YarnConfiguration conf=createNMConfig(); nm.init(conf); Object[] services=nm.getServices().toArray(); Object lastService=services[services.length - 1]; Assert.assertTrue("last service is NOT the node status updater",lastService instanceof NodeStatusUpdater); new Thread(){ public void run(){ try { nm.start(); } catch ( Throwable e) { TestNodeStatusUpdater.this.nmStartError=e; throw new YarnRuntimeException(e); } } } .start(); System.out.println(" ----- thread already started.." + nm.getServiceState()); int waitCount=0; while (nm.getServiceState() == STATE.INITED && waitCount++ != 50) { LOG.info("Waiting for NM to start.."); if (nmStartError != null) { LOG.error("Error during startup. ",nmStartError); Assert.fail(nmStartError.getCause().getMessage()); } Thread.sleep(2000); } if (nm.getServiceState() != STATE.STARTED) { Assert.fail("NodeManager failed to start"); } waitCount=0; while (heartBeatID <= 3 && waitCount++ != 200) { Thread.sleep(1000); } Assert.assertFalse(heartBeatID <= 3); Assert.assertEquals("Number of registered NMs is wrong!!",1,this.registeredNodes.size()); nm.stop(); }

InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
@Test public void testApplicationKeepAlive() throws Exception { MyNodeManager nm=new MyNodeManager(); try { YarnConfiguration conf=createNMConfig(); conf.setBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED,true); conf.setLong(YarnConfiguration.RM_NM_EXPIRY_INTERVAL_MS,4000l); nm.init(conf); nm.start(); while (heartBeatID < 12) { Thread.sleep(1000l); } MyResourceTracker3 rt=(MyResourceTracker3)nm.getNodeStatusUpdater().getRMClient(); rt.context.getApplications().remove(rt.appId); Assert.assertEquals(1,rt.keepAliveRequests.size()); int numKeepAliveRequests=rt.keepAliveRequests.get(rt.appId).size(); LOG.info("Number of Keep Alive Requests: [" + numKeepAliveRequests + "]"); Assert.assertTrue(numKeepAliveRequests == 2 || numKeepAliveRequests == 3); while (heartBeatID < 20) { Thread.sleep(1000l); } int numKeepAliveRequests2=rt.keepAliveRequests.get(rt.appId).size(); Assert.assertEquals(numKeepAliveRequests,numKeepAliveRequests2); } finally { if (nm.getServiceState() == STATE.STARTED) nm.stop(); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testStopReentrant() throws Exception { final AtomicInteger numCleanups=new AtomicInteger(0); nm=new NodeManager(){ @Override protected NodeStatusUpdater createNodeStatusUpdater( Context context, Dispatcher dispatcher, NodeHealthCheckerService healthChecker){ MyNodeStatusUpdater myNodeStatusUpdater=new MyNodeStatusUpdater(context,dispatcher,healthChecker,metrics); MyResourceTracker2 myResourceTracker2=new MyResourceTracker2(); myResourceTracker2.heartBeatNodeAction=NodeAction.SHUTDOWN; myNodeStatusUpdater.resourceTracker=myResourceTracker2; return myNodeStatusUpdater; } @Override protected ContainerManagerImpl createContainerManager( Context context, ContainerExecutor exec, DeletionService del, NodeStatusUpdater nodeStatusUpdater, ApplicationACLsManager aclsManager, LocalDirsHandlerService dirsHandler){ return new ContainerManagerImpl(context,exec,del,nodeStatusUpdater,metrics,aclsManager,dirsHandler){ @Override public void cleanUpApplicationsOnNMShutDown(){ super.cleanUpApplicationsOnNMShutDown(); numCleanups.incrementAndGet(); } } ; } } ; YarnConfiguration conf=createNMConfig(); nm.init(conf); nm.start(); int waitCount=0; while (heartBeatID < 1 && waitCount++ != 200) { Thread.sleep(500); } Assert.assertFalse(heartBeatID < 1); nm.stop(); waitCount=0; while (nm.getServiceState() != STATE.STOPPED && waitCount++ != 20) { LOG.info("Waiting for NM to stop.."); Thread.sleep(1000); } Assert.assertEquals(STATE.STOPPED,nm.getServiceState()); Assert.assertEquals(numCleanups.get(),1); }

Class: org.apache.hadoop.yarn.server.nodemanager.TestRecordFactory

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testPbRecordFactory(){ RecordFactory pbRecordFactory=RecordFactoryPBImpl.get(); try { LocalizerHeartbeatResponse response=pbRecordFactory.newRecordInstance(LocalizerHeartbeatResponse.class); Assert.assertEquals(LocalizerHeartbeatResponsePBImpl.class,response.getClass()); } catch ( YarnRuntimeException e) { e.printStackTrace(); Assert.fail("Failed to crete record"); } }

Class: org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.impl.pb.TestPBLocalizerRPC

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testLocalizerRPC() throws Exception { InetSocketAddress locAddr=new InetSocketAddress("0.0.0.0",8040); LocalizerService server=new LocalizerService(locAddr); try { server.start(); Configuration conf=new Configuration(); YarnRPC rpc=YarnRPC.create(conf); LocalizationProtocol client=(LocalizationProtocol)rpc.getProxy(LocalizationProtocol.class,locAddr,conf); LocalizerStatus status=recordFactory.newRecordInstance(LocalizerStatus.class); status.setLocalizerId("localizer0"); LocalizerHeartbeatResponse response=client.heartbeat(status); assertEquals(dieHBResponse(),response); } finally { server.stop(); } assertTrue(true); }

Class: org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.impl.pb.TestPBRecordImpl

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testLocalizerHeartbeatResponseSerDe() throws Exception { LocalizerHeartbeatResponse rsrcS=createLocalizerHeartbeatResponse(); assertTrue(rsrcS instanceof LocalizerHeartbeatResponsePBImpl); LocalizerHeartbeatResponsePBImpl rsrcPb=(LocalizerHeartbeatResponsePBImpl)rsrcS; DataOutputBuffer out=new DataOutputBuffer(); rsrcPb.getProto().writeDelimitedTo(out); DataInputBuffer in=new DataInputBuffer(); in.reset(out.getData(),0,out.getLength()); LocalizerHeartbeatResponseProto rsrcPbD=LocalizerHeartbeatResponseProto.parseDelimitedFrom(in); assertNotNull(rsrcPbD); LocalizerHeartbeatResponse rsrcD=new LocalizerHeartbeatResponsePBImpl(rsrcPbD); assertEquals(rsrcS,rsrcD); assertEquals(createResource(),rsrcS.getResourceSpecs().get(0).getResource()); assertEquals(createResource(),rsrcD.getResourceSpecs().get(0).getResource()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testLocalizerStatusSerDe() throws Exception { LocalizerStatus rsrcS=createLocalizerStatus(); assertTrue(rsrcS instanceof LocalizerStatusPBImpl); LocalizerStatusPBImpl rsrcPb=(LocalizerStatusPBImpl)rsrcS; DataOutputBuffer out=new DataOutputBuffer(); rsrcPb.getProto().writeDelimitedTo(out); DataInputBuffer in=new DataInputBuffer(); in.reset(out.getData(),0,out.getLength()); LocalizerStatusProto rsrcPbD=LocalizerStatusProto.parseDelimitedFrom(in); assertNotNull(rsrcPbD); LocalizerStatus rsrcD=new LocalizerStatusPBImpl(rsrcPbD); assertEquals(rsrcS,rsrcD); assertEquals("localizer0",rsrcS.getLocalizerId()); assertEquals("localizer0",rsrcD.getLocalizerId()); assertEquals(createLocalResourceStatus(),rsrcS.getResourceStatus(0)); assertEquals(createLocalResourceStatus(),rsrcD.getResourceStatus(0)); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testLocalResourceStatusSerDe() throws Exception { LocalResourceStatus rsrcS=createLocalResourceStatus(); assertTrue(rsrcS instanceof LocalResourceStatusPBImpl); LocalResourceStatusPBImpl rsrcPb=(LocalResourceStatusPBImpl)rsrcS; DataOutputBuffer out=new DataOutputBuffer(); rsrcPb.getProto().writeDelimitedTo(out); DataInputBuffer in=new DataInputBuffer(); in.reset(out.getData(),0,out.getLength()); LocalResourceStatusProto rsrcPbD=LocalResourceStatusProto.parseDelimitedFrom(in); assertNotNull(rsrcPbD); LocalResourceStatus rsrcD=new LocalResourceStatusPBImpl(rsrcPbD); assertEquals(rsrcS,rsrcD); assertEquals(createResource(),rsrcS.getResource()); assertEquals(createResource(),rsrcD.getResource()); }

Class: org.apache.hadoop.yarn.server.nodemanager.containermanager.TestAuxServices

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testAuxUnexpectedStop(){ Configuration conf=new Configuration(); conf.setStrings(YarnConfiguration.NM_AUX_SERVICES,new String[]{"Asrv","Bsrv"}); conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT,"Asrv"),ServiceA.class,Service.class); conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT,"Bsrv"),ServiceB.class,Service.class); final AuxServices aux=new AuxServices(); aux.init(conf); aux.start(); Service s=aux.getServices().iterator().next(); s.stop(); assertEquals("Auxiliary service stopped, but AuxService unaffected.",STOPPED,aux.getServiceState()); assertTrue(aux.getServices().isEmpty()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testAuxEventDispatch(){ Configuration conf=new Configuration(); conf.setStrings(YarnConfiguration.NM_AUX_SERVICES,new String[]{"Asrv","Bsrv"}); conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT,"Asrv"),ServiceA.class,Service.class); conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT,"Bsrv"),ServiceB.class,Service.class); conf.setInt("A.expected.init",1); conf.setInt("B.expected.stop",1); final AuxServices aux=new AuxServices(); aux.init(conf); aux.start(); ApplicationId appId1=ApplicationId.newInstance(0,65); ByteBuffer buf=ByteBuffer.allocate(6); buf.putChar('A'); buf.putInt(65); buf.flip(); AuxServicesEvent event=new AuxServicesEvent(AuxServicesEventType.APPLICATION_INIT,"user0",appId1,"Asrv",buf); aux.handle(event); ApplicationId appId2=ApplicationId.newInstance(0,66); event=new AuxServicesEvent(AuxServicesEventType.APPLICATION_STOP,"user0",appId2,"Bsrv",null); aux.handle(event); Collection servs=aux.getServices(); for ( AuxiliaryService serv : servs) { ArrayList appIds=((LightService)serv).getAppIdsStopped(); assertEquals("app not properly stopped",1,appIds.size()); assertTrue("wrong app stopped",appIds.contains((Integer)66)); } for ( AuxiliaryService serv : servs) { assertNull(((LightService)serv).containerId); assertNull(((LightService)serv).resource); } ApplicationAttemptId attemptId=ApplicationAttemptId.newInstance(appId1,1); ContainerTokenIdentifier cti=new ContainerTokenIdentifier(ContainerId.newInstance(attemptId,1),"","",Resource.newInstance(1,1),0,0,0,Priority.newInstance(0),0); Container container=new ContainerImpl(null,null,null,null,null,null,cti); ContainerId containerId=container.getContainerId(); Resource resource=container.getResource(); event=new AuxServicesEvent(AuxServicesEventType.CONTAINER_INIT,container); aux.handle(event); for ( AuxiliaryService serv : servs) { assertEquals(containerId,((LightService)serv).containerId); assertEquals(resource,((LightService)serv).resource); ((LightService)serv).containerId=null; ((LightService)serv).resource=null; } event=new AuxServicesEvent(AuxServicesEventType.CONTAINER_STOP,container); aux.handle(event); for ( AuxiliaryService serv : servs) { assertEquals(containerId,((LightService)serv).containerId); assertEquals(resource,((LightService)serv).resource); } }

BranchVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testAuxServices(){ Configuration conf=new Configuration(); conf.setStrings(YarnConfiguration.NM_AUX_SERVICES,new String[]{"Asrv","Bsrv"}); conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT,"Asrv"),ServiceA.class,Service.class); conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT,"Bsrv"),ServiceB.class,Service.class); final AuxServices aux=new AuxServices(); aux.init(conf); int latch=1; for ( Service s : aux.getServices()) { assertEquals(INITED,s.getServiceState()); if (s instanceof ServiceA) { latch*=2; } else if (s instanceof ServiceB) { latch*=3; } else fail("Unexpected service type " + s.getClass()); } assertEquals("Invalid mix of services",6,latch); aux.start(); for ( Service s : aux.getServices()) { assertEquals(STARTED,s.getServiceState()); } aux.stop(); for ( Service s : aux.getServices()) { assertEquals(STOPPED,s.getServiceState()); } }

BranchVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testAuxServicesMeta(){ Configuration conf=new Configuration(); conf.setStrings(YarnConfiguration.NM_AUX_SERVICES,new String[]{"Asrv","Bsrv"}); conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT,"Asrv"),ServiceA.class,Service.class); conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT,"Bsrv"),ServiceB.class,Service.class); final AuxServices aux=new AuxServices(); aux.init(conf); int latch=1; for ( Service s : aux.getServices()) { assertEquals(INITED,s.getServiceState()); if (s instanceof ServiceA) { latch*=2; } else if (s instanceof ServiceB) { latch*=3; } else fail("Unexpected service type " + s.getClass()); } assertEquals("Invalid mix of services",6,latch); aux.start(); for ( Service s : aux.getServices()) { assertEquals(STARTED,s.getServiceState()); } Map meta=aux.getMetaData(); assertEquals(2,meta.size()); assertEquals("A",new String(meta.get("Asrv").array())); assertEquals("B",new String(meta.get("Bsrv").array())); aux.stop(); for ( Service s : aux.getServices()) { assertEquals(STOPPED,s.getServiceState()); } }

UtilityVerifier BooleanVerifier HybridVerifier 
@Test public void testValidAuxServiceName(){ final AuxServices aux=new AuxServices(); Configuration conf=new Configuration(); conf.setStrings(YarnConfiguration.NM_AUX_SERVICES,new String[]{"Asrv1","Bsrv_2"}); conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT,"Asrv1"),ServiceA.class,Service.class); conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT,"Bsrv_2"),ServiceB.class,Service.class); try { aux.init(conf); } catch ( Exception ex) { Assert.fail("Should not receive the exception."); } final AuxServices aux1=new AuxServices(); conf.setStrings(YarnConfiguration.NM_AUX_SERVICES,new String[]{"1Asrv1"}); conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT,"1Asrv1"),ServiceA.class,Service.class); try { aux1.init(conf); Assert.fail("Should receive the exception."); } catch ( Exception ex) { assertTrue(ex.getMessage().contains("The ServiceName: 1Asrv1 set in " + "yarn.nodemanager.aux-services is invalid.The valid service name " + "should only contain a-zA-Z0-9_ and can not start with numbers")); } }

Class: org.apache.hadoop.yarn.server.nodemanager.containermanager.TestContainerManager

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testContainerLaunchAndStop() throws IOException, InterruptedException, YarnException { containerManager.start(); File scriptFile=Shell.appendScriptExtension(tmpDir,"scriptFile"); PrintWriter fileWriter=new PrintWriter(scriptFile); File processStartFile=new File(tmpDir,"start_file.txt").getAbsoluteFile(); ContainerId cId=createContainerId(0); if (Shell.WINDOWS) { fileWriter.println("@echo Hello World!> " + processStartFile); fileWriter.println("@echo " + cId + ">> "+ processStartFile); fileWriter.println("@ping -n 100 127.0.0.1 >nul"); } else { fileWriter.write("\numask 0"); fileWriter.write("\necho Hello World! > " + processStartFile); fileWriter.write("\necho $$ >> " + processStartFile); fileWriter.write("\nexec sleep 100"); } fileWriter.close(); ContainerLaunchContext containerLaunchContext=recordFactory.newRecordInstance(ContainerLaunchContext.class); URL resource_alpha=ConverterUtils.getYarnUrlFromPath(localFS.makeQualified(new Path(scriptFile.getAbsolutePath()))); LocalResource rsrc_alpha=recordFactory.newRecordInstance(LocalResource.class); rsrc_alpha.setResource(resource_alpha); rsrc_alpha.setSize(-1); rsrc_alpha.setVisibility(LocalResourceVisibility.APPLICATION); rsrc_alpha.setType(LocalResourceType.FILE); rsrc_alpha.setTimestamp(scriptFile.lastModified()); String destinationFile="dest_file"; Map localResources=new HashMap(); localResources.put(destinationFile,rsrc_alpha); containerLaunchContext.setLocalResources(localResources); List commands=Arrays.asList(Shell.getRunScriptCommand(scriptFile)); containerLaunchContext.setCommands(commands); StartContainerRequest scRequest=StartContainerRequest.newInstance(containerLaunchContext,createContainerToken(cId,DUMMY_RM_IDENTIFIER,context.getNodeId(),user,context.getContainerTokenSecretManager())); List list=new ArrayList(); list.add(scRequest); StartContainersRequest allRequests=StartContainersRequest.newInstance(list); containerManager.startContainers(allRequests); int timeoutSecs=0; while (!processStartFile.exists() && timeoutSecs++ < 20) { Thread.sleep(1000); LOG.info("Waiting for process start-file to be created"); } Assert.assertTrue("ProcessStartFile doesn't exist!",processStartFile.exists()); BufferedReader reader=new BufferedReader(new FileReader(processStartFile)); Assert.assertEquals("Hello World!",reader.readLine()); String pid=reader.readLine().trim(); Assert.assertEquals(null,reader.readLine()); Assert.assertTrue("Process is not alive!",DefaultContainerExecutor.containerIsAlive(pid)); Assert.assertTrue("Process is not alive!",DefaultContainerExecutor.containerIsAlive(pid)); List containerIds=new ArrayList(); containerIds.add(cId); StopContainersRequest stopRequest=StopContainersRequest.newInstance(containerIds); containerManager.stopContainers(stopRequest); BaseContainerManagerTest.waitForContainerState(containerManager,cId,ContainerState.COMPLETE); GetContainerStatusesRequest gcsRequest=GetContainerStatusesRequest.newInstance(containerIds); ContainerStatus containerStatus=containerManager.getContainerStatuses(gcsRequest).getContainerStatuses().get(0); int expectedExitCode=ContainerExitStatus.KILLED_BY_APPMASTER; Assert.assertEquals(expectedExitCode,containerStatus.getExitStatus()); Assert.assertFalse("Process is still alive!",DefaultContainerExecutor.containerIsAlive(pid)); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testMultipleContainersLaunch() throws Exception { containerManager.start(); List list=new ArrayList(); ContainerLaunchContext containerLaunchContext=recordFactory.newRecordInstance(ContainerLaunchContext.class); for (int i=0; i < 10; i++) { ContainerId cId=createContainerId(i); long identifier=0; if ((i & 1) == 0) identifier=ResourceManagerConstants.RM_INVALID_IDENTIFIER; else identifier=DUMMY_RM_IDENTIFIER; Token containerToken=createContainerToken(cId,identifier,context.getNodeId(),user,context.getContainerTokenSecretManager()); StartContainerRequest request=StartContainerRequest.newInstance(containerLaunchContext,containerToken); list.add(request); } StartContainersRequest requestList=StartContainersRequest.newInstance(list); StartContainersResponse response=containerManager.startContainers(requestList); Assert.assertEquals(5,response.getSuccessfullyStartedContainers().size()); for ( ContainerId id : response.getSuccessfullyStartedContainers()) { Assert.assertEquals(1,id.getId() & 1); } Assert.assertEquals(5,response.getFailedRequests().size()); for ( Map.Entry entry : response.getFailedRequests().entrySet()) { Assert.assertEquals(0,entry.getKey().getId() & 1); Assert.assertTrue(entry.getValue().getMessage().contains("Container " + entry.getKey() + " rejected as it is allocated by a previous RM")); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testMultipleContainersStopAndGetStatus() throws Exception { containerManager.start(); List startRequest=new ArrayList(); ContainerLaunchContext containerLaunchContext=recordFactory.newRecordInstance(ContainerLaunchContext.class); List containerIds=new ArrayList(); for (int i=0; i < 10; i++) { ContainerId cId=createContainerId(i); String user=null; if ((i & 1) == 0) { user="Fail"; } else { user="Pass"; } Token containerToken=createContainerToken(cId,DUMMY_RM_IDENTIFIER,context.getNodeId(),user,context.getContainerTokenSecretManager()); StartContainerRequest request=StartContainerRequest.newInstance(containerLaunchContext,containerToken); startRequest.add(request); containerIds.add(cId); } StartContainersRequest requestList=StartContainersRequest.newInstance(startRequest); containerManager.startContainers(requestList); GetContainerStatusesRequest statusRequest=GetContainerStatusesRequest.newInstance(containerIds); GetContainerStatusesResponse statusResponse=containerManager.getContainerStatuses(statusRequest); Assert.assertEquals(5,statusResponse.getContainerStatuses().size()); for ( ContainerStatus status : statusResponse.getContainerStatuses()) { Assert.assertEquals(1,status.getContainerId().getId() & 1); } Assert.assertEquals(5,statusResponse.getFailedRequests().size()); for ( Map.Entry entry : statusResponse.getFailedRequests().entrySet()) { Assert.assertEquals(0,entry.getKey().getId() & 1); Assert.assertTrue(entry.getValue().getMessage().contains("Reject this container")); } StopContainersRequest stopRequest=StopContainersRequest.newInstance(containerIds); StopContainersResponse stopResponse=containerManager.stopContainers(stopRequest); Assert.assertEquals(5,stopResponse.getSuccessfullyStoppedContainers().size()); for ( ContainerId id : stopResponse.getSuccessfullyStoppedContainers()) { Assert.assertEquals(1,id.getId() & 1); } Assert.assertEquals(5,stopResponse.getFailedRequests().size()); for ( Map.Entry entry : stopResponse.getFailedRequests().entrySet()) { Assert.assertEquals(0,entry.getKey().getId() & 1); Assert.assertTrue(entry.getValue().getMessage().contains("Reject this container")); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testContainerSetup() throws Exception { containerManager.start(); File dir=new File(tmpDir,"dir"); dir.mkdirs(); File file=new File(dir,"file"); PrintWriter fileWriter=new PrintWriter(file); fileWriter.write("Hello World!"); fileWriter.close(); ContainerId cId=createContainerId(0); ContainerLaunchContext containerLaunchContext=recordFactory.newRecordInstance(ContainerLaunchContext.class); URL resource_alpha=ConverterUtils.getYarnUrlFromPath(localFS.makeQualified(new Path(file.getAbsolutePath()))); LocalResource rsrc_alpha=recordFactory.newRecordInstance(LocalResource.class); rsrc_alpha.setResource(resource_alpha); rsrc_alpha.setSize(-1); rsrc_alpha.setVisibility(LocalResourceVisibility.APPLICATION); rsrc_alpha.setType(LocalResourceType.FILE); rsrc_alpha.setTimestamp(file.lastModified()); String destinationFile="dest_file"; Map localResources=new HashMap(); localResources.put(destinationFile,rsrc_alpha); containerLaunchContext.setLocalResources(localResources); StartContainerRequest scRequest=StartContainerRequest.newInstance(containerLaunchContext,createContainerToken(cId,DUMMY_RM_IDENTIFIER,context.getNodeId(),user,context.getContainerTokenSecretManager())); List list=new ArrayList(); list.add(scRequest); StartContainersRequest allRequests=StartContainersRequest.newInstance(list); containerManager.startContainers(allRequests); BaseContainerManagerTest.waitForContainerState(containerManager,cId,ContainerState.COMPLETE); ApplicationId appId=cId.getApplicationAttemptId().getApplicationId(); String appIDStr=ConverterUtils.toString(appId); String containerIDStr=ConverterUtils.toString(cId); File userCacheDir=new File(localDir,ContainerLocalizer.USERCACHE); File userDir=new File(userCacheDir,user); File appCache=new File(userDir,ContainerLocalizer.APPCACHE); File appDir=new File(appCache,appIDStr); File containerDir=new File(appDir,containerIDStr); File targetFile=new File(containerDir,destinationFile); File sysDir=new File(localDir,ResourceLocalizationService.NM_PRIVATE_DIR); File appSysDir=new File(sysDir,appIDStr); File containerSysDir=new File(appSysDir,containerIDStr); for ( File f : new File[]{localDir,sysDir,userCacheDir,appDir,appSysDir,containerDir,containerSysDir}) { Assert.assertTrue(f.getAbsolutePath() + " doesn't exist!!",f.exists()); Assert.assertTrue(f.getAbsolutePath() + " is not a directory!!",f.isDirectory()); } Assert.assertTrue(targetFile.getAbsolutePath() + " doesn't exist!!",targetFile.exists()); BufferedReader reader=new BufferedReader(new FileReader(targetFile)); Assert.assertEquals("Hello World!",reader.readLine()); Assert.assertEquals(null,reader.readLine()); }

APIUtilityVerifier BranchVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testContainerManagerInitialization() throws IOException { containerManager.start(); InetAddress localAddr=InetAddress.getLocalHost(); String fqdn=localAddr.getCanonicalHostName(); if (!localAddr.getHostAddress().equals(fqdn)) { Assert.assertEquals(fqdn,context.getNodeId().getHost()); } boolean throwsException=false; try { List containerIds=new ArrayList(); ContainerId id=createContainerId(0); containerIds.add(id); GetContainerStatusesRequest request=GetContainerStatusesRequest.newInstance(containerIds); GetContainerStatusesResponse response=containerManager.getContainerStatuses(request); if (response.getFailedRequests().containsKey(id)) { throw response.getFailedRequests().get(id).deSerialize(); } } catch ( Throwable e) { throwsException=true; } Assert.assertTrue(throwsException); }

Class: org.apache.hadoop.yarn.server.nodemanager.containermanager.TestContainerManagerRecovery

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testApplicationRecovery() throws Exception { YarnConfiguration conf=new YarnConfiguration(); conf.setBoolean(YarnConfiguration.NM_RECOVERY_ENABLED,true); conf.set(YarnConfiguration.NM_ADDRESS,"localhost:1234"); conf.setBoolean(YarnConfiguration.YARN_ACL_ENABLE,true); conf.set(YarnConfiguration.YARN_ADMIN_ACL,"yarn_admin_user"); NMStateStoreService stateStore=new NMMemoryStateStoreService(); stateStore.init(conf); stateStore.start(); Context context=new NMContext(new NMContainerTokenSecretManager(conf),new NMTokenSecretManagerInNM(),null,new ApplicationACLsManager(conf),stateStore); ContainerManagerImpl cm=createContainerManager(context); cm.init(conf); cm.start(); MasterKey masterKey=new MasterKeyPBImpl(); masterKey.setKeyId(123); masterKey.setBytes(ByteBuffer.wrap(new byte[]{new Integer(123).byteValue()})); context.getContainerTokenSecretManager().setMasterKey(masterKey); context.getNMTokenSecretManager().setMasterKey(masterKey); String appUser="app_user1"; String modUser="modify_user1"; String viewUser="view_user1"; String enemyUser="enemy_user"; ApplicationId appId=ApplicationId.newInstance(0,1); ApplicationAttemptId attemptId=ApplicationAttemptId.newInstance(appId,1); ContainerId cid=ContainerId.newInstance(attemptId,1); Map localResources=Collections.emptyMap(); Map containerEnv=Collections.emptyMap(); List containerCmds=Collections.emptyList(); Map serviceData=Collections.emptyMap(); Credentials containerCreds=new Credentials(); DataOutputBuffer dob=new DataOutputBuffer(); containerCreds.writeTokenStorageToStream(dob); ByteBuffer containerTokens=ByteBuffer.wrap(dob.getData(),0,dob.getLength()); Map acls=new HashMap(); acls.put(ApplicationAccessType.MODIFY_APP,modUser); acls.put(ApplicationAccessType.VIEW_APP,viewUser); ContainerLaunchContext clc=ContainerLaunchContext.newInstance(localResources,containerEnv,containerCmds,serviceData,containerTokens,acls); StartContainersResponse startResponse=startContainer(context,cm,cid,clc); assertTrue(startResponse.getFailedRequests().isEmpty()); assertEquals(1,context.getApplications().size()); Application app=context.getApplications().get(appId); assertNotNull(app); waitForAppState(app,ApplicationState.INITING); assertTrue(context.getApplicationACLsManager().checkAccess(UserGroupInformation.createRemoteUser(modUser),ApplicationAccessType.MODIFY_APP,appUser,appId)); assertFalse(context.getApplicationACLsManager().checkAccess(UserGroupInformation.createRemoteUser(viewUser),ApplicationAccessType.MODIFY_APP,appUser,appId)); assertTrue(context.getApplicationACLsManager().checkAccess(UserGroupInformation.createRemoteUser(viewUser),ApplicationAccessType.VIEW_APP,appUser,appId)); assertFalse(context.getApplicationACLsManager().checkAccess(UserGroupInformation.createRemoteUser(enemyUser),ApplicationAccessType.VIEW_APP,appUser,appId)); cm.stop(); context=new NMContext(new NMContainerTokenSecretManager(conf),new NMTokenSecretManagerInNM(),null,new ApplicationACLsManager(conf),stateStore); cm=createContainerManager(context); cm.init(conf); cm.start(); assertEquals(1,context.getApplications().size()); app=context.getApplications().get(appId); assertNotNull(app); waitForAppState(app,ApplicationState.INITING); assertTrue(context.getApplicationACLsManager().checkAccess(UserGroupInformation.createRemoteUser(modUser),ApplicationAccessType.MODIFY_APP,appUser,appId)); assertFalse(context.getApplicationACLsManager().checkAccess(UserGroupInformation.createRemoteUser(viewUser),ApplicationAccessType.MODIFY_APP,appUser,appId)); assertTrue(context.getApplicationACLsManager().checkAccess(UserGroupInformation.createRemoteUser(viewUser),ApplicationAccessType.VIEW_APP,appUser,appId)); assertFalse(context.getApplicationACLsManager().checkAccess(UserGroupInformation.createRemoteUser(enemyUser),ApplicationAccessType.VIEW_APP,appUser,appId)); List finishedApps=new ArrayList(); finishedApps.add(appId); cm.handle(new CMgrCompletedAppsEvent(finishedApps,CMgrCompletedAppsEvent.Reason.BY_RESOURCEMANAGER)); waitForAppState(app,ApplicationState.APPLICATION_RESOURCES_CLEANINGUP); cm.stop(); context=new NMContext(new NMContainerTokenSecretManager(conf),new NMTokenSecretManagerInNM(),null,new ApplicationACLsManager(conf),stateStore); cm=createContainerManager(context); cm.init(conf); cm.start(); assertEquals(1,context.getApplications().size()); app=context.getApplications().get(appId); assertNotNull(app); waitForAppState(app,ApplicationState.APPLICATION_RESOURCES_CLEANINGUP); assertTrue(context.getApplicationACLsManager().checkAccess(UserGroupInformation.createRemoteUser(modUser),ApplicationAccessType.MODIFY_APP,appUser,appId)); assertFalse(context.getApplicationACLsManager().checkAccess(UserGroupInformation.createRemoteUser(viewUser),ApplicationAccessType.MODIFY_APP,appUser,appId)); assertTrue(context.getApplicationACLsManager().checkAccess(UserGroupInformation.createRemoteUser(viewUser),ApplicationAccessType.VIEW_APP,appUser,appId)); assertFalse(context.getApplicationACLsManager().checkAccess(UserGroupInformation.createRemoteUser(enemyUser),ApplicationAccessType.VIEW_APP,appUser,appId)); app.handle(new ApplicationEvent(app.getAppId(),ApplicationEventType.APPLICATION_RESOURCES_CLEANEDUP)); assertEquals(app.getApplicationState(),ApplicationState.FINISHED); app.handle(new ApplicationEvent(app.getAppId(),ApplicationEventType.APPLICATION_LOG_HANDLING_FINISHED)); cm.stop(); context=new NMContext(new NMContainerTokenSecretManager(conf),new NMTokenSecretManagerInNM(),null,new ApplicationACLsManager(conf),stateStore); cm=createContainerManager(context); cm.init(conf); cm.start(); assertTrue(context.getApplications().isEmpty()); cm.stop(); }

Class: org.apache.hadoop.yarn.server.nodemanager.containermanager.application.TestApplication

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test @SuppressWarnings("unchecked") public void testAppFinishedOnCompletedContainers(){ WrappedApplication wa=null; try { wa=new WrappedApplication(5,314159265358979L,"yak",3); wa.initApplication(); wa.initContainer(-1); assertEquals(ApplicationState.INITING,wa.app.getApplicationState()); wa.applicationInited(); assertEquals(ApplicationState.RUNNING,wa.app.getApplicationState()); reset(wa.localizerBus); wa.containerFinished(0); wa.containerFinished(1); wa.containerFinished(2); assertEquals(ApplicationState.RUNNING,wa.app.getApplicationState()); assertEquals(0,wa.app.getContainers().size()); wa.appFinished(); assertEquals(ApplicationState.APPLICATION_RESOURCES_CLEANINGUP,wa.app.getApplicationState()); verify(wa.localizerBus).handle(refEq(new ApplicationLocalizationEvent(LocalizationEventType.DESTROY_APPLICATION_RESOURCES,wa.app))); wa.appResourcesCleanedup(); for ( Container container : wa.containers) { ContainerTokenIdentifier identifier=wa.getContainerTokenIdentifier(container.getContainerId()); waitForContainerTokenToExpire(identifier); Assert.assertTrue(wa.context.getContainerTokenSecretManager().isValidStartContainerRequest(identifier)); } assertEquals(ApplicationState.FINISHED,wa.app.getApplicationState()); } finally { if (wa != null) wa.finished(); } }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test @SuppressWarnings("unchecked") public void testAppFinishedOnRunningContainers(){ WrappedApplication wa=null; try { wa=new WrappedApplication(4,314159265358979L,"yak",3); wa.initApplication(); wa.initContainer(-1); assertEquals(ApplicationState.INITING,wa.app.getApplicationState()); wa.applicationInited(); assertEquals(ApplicationState.RUNNING,wa.app.getApplicationState()); wa.containerFinished(0); assertEquals(ApplicationState.RUNNING,wa.app.getApplicationState()); assertEquals(2,wa.app.getContainers().size()); wa.appFinished(); assertEquals(ApplicationState.FINISHING_CONTAINERS_WAIT,wa.app.getApplicationState()); assertEquals(2,wa.app.getContainers().size()); for (int i=1; i < wa.containers.size(); i++) { verify(wa.containerBus).handle(argThat(new ContainerKillMatcher(wa.containers.get(i).getContainerId()))); } wa.containerFinished(1); assertEquals(ApplicationState.FINISHING_CONTAINERS_WAIT,wa.app.getApplicationState()); assertEquals(1,wa.app.getContainers().size()); reset(wa.localizerBus); wa.containerFinished(2); assertEquals(ApplicationState.APPLICATION_RESOURCES_CLEANINGUP,wa.app.getApplicationState()); assertEquals(0,wa.app.getContainers().size()); verify(wa.localizerBus).handle(refEq(new ApplicationLocalizationEvent(LocalizationEventType.DESTROY_APPLICATION_RESOURCES,wa.app))); verify(wa.auxBus).handle(refEq(new AuxServicesEvent(AuxServicesEventType.APPLICATION_STOP,wa.appId))); wa.appResourcesCleanedup(); for ( Container container : wa.containers) { ContainerTokenIdentifier identifier=wa.getContainerTokenIdentifier(container.getContainerId()); waitForContainerTokenToExpire(identifier); Assert.assertTrue(wa.context.getContainerTokenSecretManager().isValidStartContainerRequest(identifier)); } assertEquals(ApplicationState.FINISHED,wa.app.getApplicationState()); } finally { if (wa != null) wa.finished(); } }

Class: org.apache.hadoop.yarn.server.nodemanager.containermanager.container.TestContainer

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test @SuppressWarnings("unchecked") public void testInitWhileDone() throws Exception { WrappedContainer wc=null; try { wc=new WrappedContainer(6,314159265358979L,4344,"yak"); wc.initContainer(); wc.localizeResources(); wc.launchContainer(); reset(wc.localizerBus); wc.containerSuccessful(); wc.containerResourcesCleanup(); assertEquals(ContainerState.DONE,wc.c.getContainerState()); assertNull(wc.c.getLocalizedResources()); wc.initContainer(); assertEquals(ContainerState.DONE,wc.c.getContainerState()); assertNull(wc.c.getLocalizedResources()); verifyCleanupCall(wc); } finally { if (wc != null) { wc.finished(); } } }

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testResourceLocalizedOnLocalizationFailed() throws Exception { WrappedContainer wc=null; try { wc=new WrappedContainer(16,314159265358979L,4344,"yak"); wc.initContainer(); int failCount=wc.getLocalResourceCount() / 2; if (failCount == 0) { failCount=1; } wc.failLocalizeResources(failCount); assertEquals(ContainerState.LOCALIZATION_FAILED,wc.c.getContainerState()); assertNull(wc.c.getLocalizedResources()); wc.localizeResourcesFromInvalidState(failCount); assertEquals(ContainerState.LOCALIZATION_FAILED,wc.c.getContainerState()); assertNull(wc.c.getLocalizedResources()); verifyCleanupCall(wc); Assert.assertTrue(wc.getDiagnostics().contains(FAKE_LOCALIZATION_ERROR)); } finally { if (wc != null) { wc.finished(); } } }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testKillOnLocalizedWhenContainerLaunched() throws Exception { WrappedContainer wc=null; try { wc=new WrappedContainer(17,314159265358979L,4344,"yak"); wc.initContainer(); wc.localizeResources(); assertEquals(ContainerState.LOCALIZED,wc.c.getContainerState()); ContainerLaunch launcher=wc.launcher.running.get(wc.c.getContainerId()); launcher.call(); wc.drainDispatcherEvents(); assertEquals(ContainerState.EXITED_WITH_FAILURE,wc.c.getContainerState()); wc.killContainer(); assertEquals(ContainerState.EXITED_WITH_FAILURE,wc.c.getContainerState()); assertNull(wc.c.getLocalizedResources()); verifyCleanupCall(wc); } finally { if (wc != null) { wc.finished(); } } }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testKillOnLocalizedWhenContainerNotLaunched() throws Exception { WrappedContainer wc=null; try { wc=new WrappedContainer(17,314159265358979L,4344,"yak"); wc.initContainer(); wc.localizeResources(); assertEquals(ContainerState.LOCALIZED,wc.c.getContainerState()); ContainerLaunch launcher=wc.launcher.running.get(wc.c.getContainerId()); wc.killContainer(); assertEquals(ContainerState.KILLING,wc.c.getContainerState()); launcher.call(); wc.drainDispatcherEvents(); assertEquals(ContainerState.CONTAINER_CLEANEDUP_AFTER_KILL,wc.c.getContainerState()); assertNull(wc.c.getLocalizedResources()); verifyCleanupCall(wc); wc.c.handle(new ContainerEvent(wc.c.getContainerId(),ContainerEventType.CONTAINER_RESOURCES_CLEANEDUP)); assertEquals(0,metrics.getRunningContainers()); } finally { if (wc != null) { wc.finished(); } } }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test @SuppressWarnings("unchecked") public void testCleanupOnKillRequest() throws Exception { WrappedContainer wc=null; try { wc=new WrappedContainer(12,314159265358979L,4344,"yak"); wc.initContainer(); wc.localizeResources(); wc.launchContainer(); reset(wc.localizerBus); wc.killContainer(); assertEquals(ContainerState.KILLING,wc.c.getContainerState()); assertNull(wc.c.getLocalizedResources()); wc.containerKilledOnRequest(); verifyCleanupCall(wc); } finally { if (wc != null) { wc.finished(); } } }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testResourceFailedOnLocalizationFailed() throws Exception { WrappedContainer wc=null; try { wc=new WrappedContainer(16,314159265358979L,4344,"yak"); wc.initContainer(); Iterator lRsrcKeys=wc.localResources.keySet().iterator(); String key1=lRsrcKeys.next(); String key2=lRsrcKeys.next(); wc.failLocalizeSpecificResource(key1); assertEquals(ContainerState.LOCALIZATION_FAILED,wc.c.getContainerState()); assertNull(wc.c.getLocalizedResources()); wc.failLocalizeSpecificResource(key2); assertEquals(ContainerState.LOCALIZATION_FAILED,wc.c.getContainerState()); assertNull(wc.c.getLocalizedResources()); verifyCleanupCall(wc); } finally { if (wc != null) { wc.finished(); } } }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test @SuppressWarnings("unchecked") public void testCleanupOnSuccess() throws Exception { WrappedContainer wc=null; try { wc=new WrappedContainer(11,314159265358979L,4344,"yak"); wc.initContainer(); wc.localizeResources(); wc.launchContainer(); reset(wc.localizerBus); wc.containerSuccessful(); assertEquals(ContainerState.EXITED_WITH_SUCCESS,wc.c.getContainerState()); assertNull(wc.c.getLocalizedResources()); verifyCleanupCall(wc); } finally { if (wc != null) { wc.finished(); } } }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testKillOnLocalizing() throws Exception { WrappedContainer wc=null; try { wc=new WrappedContainer(14,314159265358979L,4344,"yak"); wc.initContainer(); assertEquals(ContainerState.LOCALIZING,wc.c.getContainerState()); wc.killContainer(); assertEquals(ContainerState.KILLING,wc.c.getContainerState()); assertEquals(ContainerExitStatus.KILLED_BY_RESOURCEMANAGER,wc.c.cloneAndGetContainerStatus().getExitStatus()); assertTrue(wc.c.cloneAndGetContainerStatus().getDiagnostics().contains("KillRequest")); } finally { if (wc != null) { wc.finished(); } } }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testKillOnLocalizationFailed() throws Exception { WrappedContainer wc=null; try { wc=new WrappedContainer(15,314159265358979L,4344,"yak"); wc.initContainer(); wc.failLocalizeResources(wc.getLocalResourceCount()); assertEquals(ContainerState.LOCALIZATION_FAILED,wc.c.getContainerState()); assertNull(wc.c.getLocalizedResources()); wc.killContainer(); assertEquals(ContainerState.LOCALIZATION_FAILED,wc.c.getContainerState()); assertNull(wc.c.getLocalizedResources()); verifyCleanupCall(wc); } finally { if (wc != null) { wc.finished(); } } }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testLaunchAfterKillRequest() throws Exception { WrappedContainer wc=null; try { wc=new WrappedContainer(14,314159265358979L,4344,"yak"); wc.initContainer(); wc.localizeResources(); wc.killContainer(); assertEquals(ContainerState.KILLING,wc.c.getContainerState()); assertNull(wc.c.getLocalizedResources()); wc.launchContainer(); assertEquals(ContainerState.KILLING,wc.c.getContainerState()); assertNull(wc.c.getLocalizedResources()); wc.containerKilledOnRequest(); verifyCleanupCall(wc); } finally { if (wc != null) { wc.finished(); } } }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testResourceFailedOnKilling() throws Exception { WrappedContainer wc=null; try { wc=new WrappedContainer(16,314159265358979L,4344,"yak"); wc.initContainer(); Iterator lRsrcKeys=wc.localResources.keySet().iterator(); String key1=lRsrcKeys.next(); wc.killContainer(); assertEquals(ContainerState.KILLING,wc.c.getContainerState()); assertNull(wc.c.getLocalizedResources()); wc.failLocalizeSpecificResource(key1); assertEquals(ContainerState.KILLING,wc.c.getContainerState()); assertNull(wc.c.getLocalizedResources()); verifyCleanupCall(wc); } finally { if (wc != null) { wc.finished(); } } }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test @SuppressWarnings("unchecked") public void testCleanupOnFailure() throws Exception { WrappedContainer wc=null; try { wc=new WrappedContainer(10,314159265358979L,4344,"yak"); wc.initContainer(); wc.localizeResources(); wc.launchContainer(); reset(wc.localizerBus); wc.containerFailed(ExitCode.FORCE_KILLED.getExitCode()); assertEquals(ContainerState.EXITED_WITH_FAILURE,wc.c.getContainerState()); assertNull(wc.c.getLocalizedResources()); verifyCleanupCall(wc); } finally { if (wc != null) { wc.finished(); } } }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test @SuppressWarnings("unchecked") public void testExternalKill() throws Exception { WrappedContainer wc=null; try { wc=new WrappedContainer(13,314159265358979L,4344,"yak"); wc.initContainer(); wc.localizeResources(); wc.launchContainer(); reset(wc.localizerBus); wc.containerKilledOnRequest(); assertEquals(ContainerState.EXITED_WITH_FAILURE,wc.c.getContainerState()); assertNull(wc.c.getLocalizedResources()); verifyCleanupCall(wc); } finally { if (wc != null) { wc.finished(); } } }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test @SuppressWarnings("unchecked") public void testLocalizationFailureAtDone() throws Exception { WrappedContainer wc=null; try { wc=new WrappedContainer(6,314159265358979L,4344,"yak"); wc.initContainer(); wc.localizeResources(); wc.launchContainer(); reset(wc.localizerBus); wc.containerSuccessful(); wc.containerResourcesCleanup(); assertEquals(ContainerState.DONE,wc.c.getContainerState()); assertNull(wc.c.getLocalizedResources()); wc.resourceFailedContainer(); assertEquals(ContainerState.DONE,wc.c.getContainerState()); assertNull(wc.c.getLocalizedResources()); verifyCleanupCall(wc); } finally { if (wc != null) { wc.finished(); } } }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testKillOnNew() throws Exception { WrappedContainer wc=null; try { wc=new WrappedContainer(13,314159265358979L,4344,"yak"); assertEquals(ContainerState.NEW,wc.c.getContainerState()); wc.killContainer(); assertEquals(ContainerState.DONE,wc.c.getContainerState()); assertEquals(ContainerExitStatus.KILLED_BY_RESOURCEMANAGER,wc.c.cloneAndGetContainerStatus().getExitStatus()); assertTrue(wc.c.cloneAndGetContainerStatus().getDiagnostics().contains("KillRequest")); } finally { if (wc != null) { wc.finished(); } } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Verify container launch when all resources already cached. */ @Test public void testLocalizationLaunch() throws Exception { WrappedContainer wc=null; try { wc=new WrappedContainer(8,314159265358979L,4344,"yak"); assertEquals(ContainerState.NEW,wc.c.getContainerState()); wc.initContainer(); Map> localPaths=wc.localizeResources(); assertEquals(ContainerState.LOCALIZED,wc.c.getContainerState()); assertNotNull(wc.c.getLocalizedResources()); for ( Entry> loc : wc.c.getLocalizedResources().entrySet()) { assertEquals(localPaths.remove(loc.getKey()),loc.getValue()); } assertTrue(localPaths.isEmpty()); final WrappedContainer wcf=wc; ArgumentMatcher matchesContainerLaunch=new ArgumentMatcher(){ @Override public boolean matches( Object o){ ContainersLauncherEvent launchEvent=(ContainersLauncherEvent)o; return wcf.c == launchEvent.getContainer(); } } ; verify(wc.launcherBus).handle(argThat(matchesContainerLaunch)); } finally { if (wc != null) { wc.finished(); } } }

Class: org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.TestContainerLaunch

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
@Test(timeout=20000) public void testInvalidEnvSyntaxDiagnostics() throws IOException { File shellFile=null; try { shellFile=Shell.appendScriptExtension(tmpDir,"hello"); Map> resources=new HashMap>(); FileOutputStream fos=new FileOutputStream(shellFile); FileUtil.setExecutable(shellFile,true); Map env=new HashMap(); env.put("APPLICATION_WORKFLOW_CONTEXT","{\"workflowId\":\"609f91c5cd83\"," + "\"workflowName\":\"\n\ninsert table " + "\npartition (cd_education_status)\nselect cd_demo_sk, cd_gender, "); List commands=new ArrayList(); ContainerLaunch.writeLaunchEnv(fos,env,resources,commands); fos.flush(); fos.close(); Map cmdEnv=new HashMap(); cmdEnv.put("LANG","C"); Shell.ShellCommandExecutor shexc=new Shell.ShellCommandExecutor(new String[]{shellFile.getAbsolutePath()},tmpDir,cmdEnv); String diagnostics=null; try { shexc.execute(); Assert.fail("Should catch exception"); } catch ( ExitCodeException e) { diagnostics=e.getMessage(); } Assert.assertTrue(diagnostics.contains(Shell.WINDOWS ? "is not recognized as an internal or external command" : "command not found")); Assert.assertTrue(shexc.getExitCode() != 0); } finally { if (shellFile != null && shellFile.exists()) { shellFile.delete(); } } }

UtilityVerifier AssumptionSetter EqualityVerifier ConditionMatcher HybridVerifier 
@Test(timeout=10000) public void testWindowsShellScriptBuilderMkdir() throws IOException { String mkDirCmd="@if not exist \"\" mkdir \"\""; Assume.assumeTrue(Shell.WINDOWS); assertEquals(8191,Shell.WINDOWS_MAX_SHELL_LENGHT); ShellScriptBuilder builder=ShellScriptBuilder.create(); builder.mkdir(new Path(org.apache.commons.lang.StringUtils.repeat("A",1024))); builder.mkdir(new Path(org.apache.commons.lang.StringUtils.repeat("E",(Shell.WINDOWS_MAX_SHELL_LENGHT - mkDirCmd.length()) / 2))); try { builder.mkdir(new Path(org.apache.commons.lang.StringUtils.repeat("X",(Shell.WINDOWS_MAX_SHELL_LENGHT - mkDirCmd.length()) / 2 + 1))); fail("long mkdir was expected to throw"); } catch ( IOException e) { assertThat(e.getMessage(),containsString(expectedMessage)); } }

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier 
@Test(timeout=20000) public void testInvalidSymlinkDiagnostics() throws IOException { File shellFile=null; File tempFile=null; String symLink=Shell.WINDOWS ? "test.cmd" : "test"; File symLinkFile=null; try { shellFile=Shell.appendScriptExtension(tmpDir,"hello"); tempFile=Shell.appendScriptExtension(tmpDir,"temp"); String timeoutCommand=Shell.WINDOWS ? "@echo \"hello\"" : "echo \"hello\""; PrintWriter writer=new PrintWriter(new FileOutputStream(shellFile)); FileUtil.setExecutable(shellFile,true); writer.println(timeoutCommand); writer.close(); Map> resources=new HashMap>(); Path invalidPath=new Path(shellFile.getAbsolutePath() + "randomPath"); resources.put(invalidPath,Arrays.asList(symLink)); FileOutputStream fos=new FileOutputStream(tempFile); Map env=new HashMap(); List commands=new ArrayList(); if (Shell.WINDOWS) { commands.add("cmd"); commands.add("/c"); commands.add("\"" + symLink + "\""); } else { commands.add("/bin/sh ./\\\"" + symLink + "\\\""); } ContainerLaunch.writeLaunchEnv(fos,env,resources,commands); fos.flush(); fos.close(); FileUtil.setExecutable(tempFile,true); Shell.ShellCommandExecutor shexc=new Shell.ShellCommandExecutor(new String[]{tempFile.getAbsolutePath()},tmpDir); String diagnostics=null; try { shexc.execute(); Assert.fail("Should catch exception"); } catch ( ExitCodeException e) { diagnostics=e.getMessage(); } Assert.assertNotNull(diagnostics); Assert.assertTrue(shexc.getExitCode() != 0); symLinkFile=new File(tmpDir,symLink); } finally { if (shellFile != null && shellFile.exists()) { shellFile.delete(); } if (tempFile != null && tempFile.exists()) { tempFile.delete(); } if (symLinkFile != null && symLinkFile.exists()) { symLinkFile.delete(); } } }

UtilityVerifier AssumptionSetter EqualityVerifier ConditionMatcher HybridVerifier 
@Test(timeout=10000) public void testWindowsShellScriptBuilderLink() throws IOException { Assume.assumeTrue(Shell.WINDOWS); String linkCmd="@" + Shell.WINUTILS + " symlink \"\" \"\""; assertEquals(8191,Shell.WINDOWS_MAX_SHELL_LENGHT); ShellScriptBuilder builder=ShellScriptBuilder.create(); builder.link(new Path(org.apache.commons.lang.StringUtils.repeat("A",1024)),new Path(org.apache.commons.lang.StringUtils.repeat("B",1024))); builder.link(new Path(org.apache.commons.lang.StringUtils.repeat("E",(Shell.WINDOWS_MAX_SHELL_LENGHT - linkCmd.length()) / 2)),new Path(org.apache.commons.lang.StringUtils.repeat("F",(Shell.WINDOWS_MAX_SHELL_LENGHT - linkCmd.length()) / 2))); try { builder.link(new Path(org.apache.commons.lang.StringUtils.repeat("X",(Shell.WINDOWS_MAX_SHELL_LENGHT - linkCmd.length()) / 2 + 1)),new Path(org.apache.commons.lang.StringUtils.repeat("Y",(Shell.WINDOWS_MAX_SHELL_LENGHT - linkCmd.length()) / 2) + 1)); fail("long link was expected to throw"); } catch ( IOException e) { assertThat(e.getMessage(),containsString(expectedMessage)); } }

UtilityVerifier AssumptionSetter EqualityVerifier ConditionMatcher HybridVerifier 
@Test(timeout=10000) public void testWindowsShellScriptBuilderCommand() throws IOException { String callCmd="@call "; Assume.assumeTrue(Shell.WINDOWS); assertEquals(8191,Shell.WINDOWS_MAX_SHELL_LENGHT); ShellScriptBuilder builder=ShellScriptBuilder.create(); builder.command(Arrays.asList(org.apache.commons.lang.StringUtils.repeat("A",1024))); builder.command(Arrays.asList(org.apache.commons.lang.StringUtils.repeat("E",Shell.WINDOWS_MAX_SHELL_LENGHT - callCmd.length()))); try { builder.command(Arrays.asList(org.apache.commons.lang.StringUtils.repeat("X",Shell.WINDOWS_MAX_SHELL_LENGHT - callCmd.length() + 1))); fail("longCommand was expected to throw"); } catch ( IOException e) { assertThat(e.getMessage(),containsString(expectedMessage)); } builder.command(Arrays.asList(org.apache.commons.lang.StringUtils.repeat("A",1024),org.apache.commons.lang.StringUtils.repeat("A",1024),org.apache.commons.lang.StringUtils.repeat("A",1024))); builder.command(Arrays.asList(org.apache.commons.lang.StringUtils.repeat("E",4095),org.apache.commons.lang.StringUtils.repeat("E",2047),org.apache.commons.lang.StringUtils.repeat("E",2047 - callCmd.length()))); try { builder.command(Arrays.asList(org.apache.commons.lang.StringUtils.repeat("X",4095),org.apache.commons.lang.StringUtils.repeat("X",2047),org.apache.commons.lang.StringUtils.repeat("X",2048 - callCmd.length()))); fail("long commands was expected to throw"); } catch ( IOException e) { assertThat(e.getMessage(),containsString(expectedMessage)); } }

BooleanVerifier EqualityVerifier HybridVerifier 
@SuppressWarnings("rawtypes") @Test(timeout=10000) public void testCallFailureWithNullLocalizedResources(){ Container container=mock(Container.class); when(container.getContainerId()).thenReturn(ContainerId.newInstance(ApplicationAttemptId.newInstance(ApplicationId.newInstance(System.currentTimeMillis(),1),1),1)); ContainerLaunchContext clc=mock(ContainerLaunchContext.class); when(clc.getCommands()).thenReturn(Collections.emptyList()); when(container.getLaunchContext()).thenReturn(clc); when(container.getLocalizedResources()).thenReturn(null); Dispatcher dispatcher=mock(Dispatcher.class); EventHandler eventHandler=new EventHandler(){ public void handle( Event event){ Assert.assertTrue(event instanceof ContainerExitEvent); ContainerExitEvent exitEvent=(ContainerExitEvent)event; Assert.assertEquals(ContainerEventType.CONTAINER_EXITED_WITH_FAILURE,exitEvent.getType()); } } ; when(dispatcher.getEventHandler()).thenReturn(eventHandler); ContainerLaunch launch=new ContainerLaunch(context,new Configuration(),dispatcher,exec,null,container,dirsHandler,containerManager); launch.call(); }

UtilityVerifier AssumptionSetter EqualityVerifier ConditionMatcher HybridVerifier 
@Test(timeout=10000) public void testWindowsShellScriptBuilderEnv() throws IOException { Assume.assumeTrue(Shell.WINDOWS); assertEquals(8191,Shell.WINDOWS_MAX_SHELL_LENGHT); ShellScriptBuilder builder=ShellScriptBuilder.create(); builder.env("somekey",org.apache.commons.lang.StringUtils.repeat("A",1024)); builder.env("somekey",org.apache.commons.lang.StringUtils.repeat("A",Shell.WINDOWS_MAX_SHELL_LENGHT - ("@set somekey=").length())); try { builder.env("somekey",org.apache.commons.lang.StringUtils.repeat("A",Shell.WINDOWS_MAX_SHELL_LENGHT - ("@set somekey=").length()) + 1); fail("long env was expected to throw"); } catch ( IOException e) { assertThat(e.getMessage(),containsString(expectedMessage)); } }

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
@Test(timeout=20000) public void testContainerLaunchStdoutAndStderrDiagnostics() throws IOException { File shellFile=null; try { shellFile=Shell.appendScriptExtension(tmpDir,"hello"); String command=Shell.WINDOWS ? "@echo \"hello\" & @echo \"error\" 1>&2 & exit /b 2" : "echo \"hello\"; echo \"error\" 1>&2; exit 2;"; PrintWriter writer=new PrintWriter(new FileOutputStream(shellFile)); FileUtil.setExecutable(shellFile,true); writer.println(command); writer.close(); Map> resources=new HashMap>(); FileOutputStream fos=new FileOutputStream(shellFile,true); Map env=new HashMap(); List commands=new ArrayList(); commands.add(command); ContainerLaunch.writeLaunchEnv(fos,env,resources,commands); fos.flush(); fos.close(); Shell.ShellCommandExecutor shexc=new Shell.ShellCommandExecutor(new String[]{shellFile.getAbsolutePath()},tmpDir); String diagnostics=null; try { shexc.execute(); Assert.fail("Should catch exception"); } catch ( ExitCodeException e) { diagnostics=e.getMessage(); } Assert.assertTrue(diagnostics.contains("error")); Assert.assertTrue(shexc.getOutput().contains("hello")); Assert.assertTrue(shexc.getExitCode() == 2); } finally { if (shellFile != null && shellFile.exists()) { shellFile.delete(); } } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * See if environment variable is forwarded using sanitizeEnv. * @throws Exception */ @Test(timeout=60000) public void testContainerEnvVariables() throws Exception { containerManager.start(); ContainerLaunchContext containerLaunchContext=recordFactory.newRecordInstance(ContainerLaunchContext.class); ApplicationId appId=ApplicationId.newInstance(0,0); ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,1); ContainerId cId=ContainerId.newInstance(appAttemptId,0); Map userSetEnv=new HashMap(); userSetEnv.put(Environment.CONTAINER_ID.name(),"user_set_container_id"); userSetEnv.put(Environment.NM_HOST.name(),"user_set_NM_HOST"); userSetEnv.put(Environment.NM_PORT.name(),"user_set_NM_PORT"); userSetEnv.put(Environment.NM_HTTP_PORT.name(),"user_set_NM_HTTP_PORT"); userSetEnv.put(Environment.LOCAL_DIRS.name(),"user_set_LOCAL_DIR"); userSetEnv.put(Environment.USER.key(),"user_set_" + Environment.USER.key()); userSetEnv.put(Environment.LOGNAME.name(),"user_set_LOGNAME"); userSetEnv.put(Environment.PWD.name(),"user_set_PWD"); userSetEnv.put(Environment.HOME.name(),"user_set_HOME"); containerLaunchContext.setEnvironment(userSetEnv); File scriptFile=Shell.appendScriptExtension(tmpDir,"scriptFile"); PrintWriter fileWriter=new PrintWriter(scriptFile); File processStartFile=new File(tmpDir,"env_vars.txt").getAbsoluteFile(); if (Shell.WINDOWS) { fileWriter.println("@echo " + Environment.CONTAINER_ID.$() + "> "+ processStartFile); fileWriter.println("@echo " + Environment.NM_HOST.$() + ">> "+ processStartFile); fileWriter.println("@echo " + Environment.NM_PORT.$() + ">> "+ processStartFile); fileWriter.println("@echo " + Environment.NM_HTTP_PORT.$() + ">> "+ processStartFile); fileWriter.println("@echo " + Environment.LOCAL_DIRS.$() + ">> "+ processStartFile); fileWriter.println("@echo " + Environment.USER.$() + ">> "+ processStartFile); fileWriter.println("@echo " + Environment.LOGNAME.$() + ">> "+ processStartFile); fileWriter.println("@echo " + Environment.PWD.$() + ">> "+ processStartFile); fileWriter.println("@echo " + Environment.HOME.$() + ">> "+ processStartFile); for ( String serviceName : containerManager.getAuxServiceMetaData().keySet()) { fileWriter.println("@echo %" + AuxiliaryServiceHelper.NM_AUX_SERVICE + serviceName+ "%>> "+ processStartFile); } fileWriter.println("@echo " + cId + ">> "+ processStartFile); fileWriter.println("@ping -n 100 127.0.0.1 >nul"); } else { fileWriter.write("\numask 0"); fileWriter.write("\necho $" + Environment.CONTAINER_ID.name() + " > "+ processStartFile); fileWriter.write("\necho $" + Environment.NM_HOST.name() + " >> "+ processStartFile); fileWriter.write("\necho $" + Environment.NM_PORT.name() + " >> "+ processStartFile); fileWriter.write("\necho $" + Environment.NM_HTTP_PORT.name() + " >> "+ processStartFile); fileWriter.write("\necho $" + Environment.LOCAL_DIRS.name() + " >> "+ processStartFile); fileWriter.write("\necho $" + Environment.USER.name() + " >> "+ processStartFile); fileWriter.write("\necho $" + Environment.LOGNAME.name() + " >> "+ processStartFile); fileWriter.write("\necho $" + Environment.PWD.name() + " >> "+ processStartFile); fileWriter.write("\necho $" + Environment.HOME.name() + " >> "+ processStartFile); for ( String serviceName : containerManager.getAuxServiceMetaData().keySet()) { fileWriter.write("\necho $" + AuxiliaryServiceHelper.NM_AUX_SERVICE + serviceName+ " >> "+ processStartFile); } fileWriter.write("\necho $$ >> " + processStartFile); fileWriter.write("\nexec sleep 100"); } fileWriter.close(); URL resource_alpha=ConverterUtils.getYarnUrlFromPath(localFS.makeQualified(new Path(scriptFile.getAbsolutePath()))); LocalResource rsrc_alpha=recordFactory.newRecordInstance(LocalResource.class); rsrc_alpha.setResource(resource_alpha); rsrc_alpha.setSize(-1); rsrc_alpha.setVisibility(LocalResourceVisibility.APPLICATION); rsrc_alpha.setType(LocalResourceType.FILE); rsrc_alpha.setTimestamp(scriptFile.lastModified()); String destinationFile="dest_file"; Map localResources=new HashMap(); localResources.put(destinationFile,rsrc_alpha); containerLaunchContext.setLocalResources(localResources); List commands=Arrays.asList(Shell.getRunScriptCommand(scriptFile)); containerLaunchContext.setCommands(commands); StartContainerRequest scRequest=StartContainerRequest.newInstance(containerLaunchContext,createContainerToken(cId,Priority.newInstance(0),0)); List list=new ArrayList(); list.add(scRequest); StartContainersRequest allRequests=StartContainersRequest.newInstance(list); containerManager.startContainers(allRequests); int timeoutSecs=0; while (!processStartFile.exists() && timeoutSecs++ < 20) { Thread.sleep(1000); LOG.info("Waiting for process start-file to be created"); } Assert.assertTrue("ProcessStartFile doesn't exist!",processStartFile.exists()); List localDirs=dirsHandler.getLocalDirs(); List logDirs=dirsHandler.getLogDirs(); List appDirs=new ArrayList(localDirs.size()); for ( String localDir : localDirs) { Path usersdir=new Path(localDir,ContainerLocalizer.USERCACHE); Path userdir=new Path(usersdir,user); Path appsdir=new Path(userdir,ContainerLocalizer.APPCACHE); appDirs.add(new Path(appsdir,appId.toString())); } List containerLogDirs=new ArrayList(); String relativeContainerLogDir=ContainerLaunch.getRelativeContainerLogDir(appId.toString(),cId.toString()); for ( String logDir : logDirs) { containerLogDirs.add(logDir + Path.SEPARATOR + relativeContainerLogDir); } BufferedReader reader=new BufferedReader(new FileReader(processStartFile)); Assert.assertEquals(cId.toString(),reader.readLine()); Assert.assertEquals(context.getNodeId().getHost(),reader.readLine()); Assert.assertEquals(String.valueOf(context.getNodeId().getPort()),reader.readLine()); Assert.assertEquals(String.valueOf(HTTP_PORT),reader.readLine()); Assert.assertEquals(StringUtils.join(",",appDirs),reader.readLine()); Assert.assertEquals(user,reader.readLine()); Assert.assertEquals(user,reader.readLine()); String obtainedPWD=reader.readLine(); boolean found=false; for ( Path localDir : appDirs) { if (new Path(localDir,cId.toString()).toString().equals(obtainedPWD)) { found=true; break; } } Assert.assertTrue("Wrong local-dir found : " + obtainedPWD,found); Assert.assertEquals(conf.get(YarnConfiguration.NM_USER_HOME_DIR,YarnConfiguration.DEFAULT_NM_USER_HOME_DIR),reader.readLine()); for ( String serviceName : containerManager.getAuxServiceMetaData().keySet()) { Assert.assertEquals(containerManager.getAuxServiceMetaData().get(serviceName),ByteBuffer.wrap(Base64.decodeBase64(reader.readLine().getBytes()))); } Assert.assertEquals(cId.toString(),containerLaunchContext.getEnvironment().get(Environment.CONTAINER_ID.name())); Assert.assertEquals(context.getNodeId().getHost(),containerLaunchContext.getEnvironment().get(Environment.NM_HOST.name())); Assert.assertEquals(String.valueOf(context.getNodeId().getPort()),containerLaunchContext.getEnvironment().get(Environment.NM_PORT.name())); Assert.assertEquals(String.valueOf(HTTP_PORT),containerLaunchContext.getEnvironment().get(Environment.NM_HTTP_PORT.name())); Assert.assertEquals(StringUtils.join(",",appDirs),containerLaunchContext.getEnvironment().get(Environment.LOCAL_DIRS.name())); Assert.assertEquals(StringUtils.join(",",containerLogDirs),containerLaunchContext.getEnvironment().get(Environment.LOG_DIRS.name())); Assert.assertEquals(user,containerLaunchContext.getEnvironment().get(Environment.USER.name())); Assert.assertEquals(user,containerLaunchContext.getEnvironment().get(Environment.LOGNAME.name())); found=false; obtainedPWD=containerLaunchContext.getEnvironment().get(Environment.PWD.name()); for ( Path localDir : appDirs) { if (new Path(localDir,cId.toString()).toString().equals(obtainedPWD)) { found=true; break; } } Assert.assertTrue("Wrong local-dir found : " + obtainedPWD,found); Assert.assertEquals(conf.get(YarnConfiguration.NM_USER_HOME_DIR,YarnConfiguration.DEFAULT_NM_USER_HOME_DIR),containerLaunchContext.getEnvironment().get(Environment.HOME.name())); String pid=reader.readLine().trim(); Assert.assertEquals(null,reader.readLine()); Assert.assertTrue("Process is not alive!",DefaultContainerExecutor.containerIsAlive(pid)); Assert.assertTrue("Process is not alive!",DefaultContainerExecutor.containerIsAlive(pid)); List containerIds=new ArrayList(); containerIds.add(cId); StopContainersRequest stopRequest=StopContainersRequest.newInstance(containerIds); containerManager.stopContainers(stopRequest); BaseContainerManagerTest.waitForContainerState(containerManager,cId,ContainerState.COMPLETE); GetContainerStatusesRequest gcsRequest=GetContainerStatusesRequest.newInstance(containerIds); ContainerStatus containerStatus=containerManager.getContainerStatuses(gcsRequest).getContainerStatuses().get(0); int expectedExitCode=ContainerExitStatus.KILLED_BY_APPMASTER; Assert.assertEquals(expectedExitCode,containerStatus.getExitStatus()); Assert.assertFalse("Process is still alive!",DefaultContainerExecutor.containerIsAlive(pid)); }

Class: org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.TestLocalCacheDirectoryManager

NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testMinimumPerDirectoryFileLimit(){ YarnConfiguration conf=new YarnConfiguration(); conf.set(YarnConfiguration.NM_LOCAL_CACHE_MAX_FILES_PER_DIRECTORY,"1"); Exception e=null; ResourceLocalizationService service=new ResourceLocalizationService(null,null,null,null,null); try { service.init(conf); } catch ( Exception e1) { e=e1; } Assert.assertNotNull(e); Assert.assertEquals(YarnRuntimeException.class,e.getClass()); Assert.assertEquals(e.getMessage(),YarnConfiguration.NM_LOCAL_CACHE_MAX_FILES_PER_DIRECTORY + " parameter is configured with a value less than 37."); }

IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testHierarchicalSubDirectoryCreation(){ YarnConfiguration conf=new YarnConfiguration(); conf.set(YarnConfiguration.NM_LOCAL_CACHE_MAX_FILES_PER_DIRECTORY,"37"); LocalCacheDirectoryManager hDir=new LocalCacheDirectoryManager(conf); Assert.assertTrue(hDir.getRelativePathForLocalization().isEmpty()); for (int i=1; i <= 37 * 36 * 36; i++) { StringBuffer sb=new StringBuffer(); String num=Integer.toString(i - 1,36); if (num.length() == 1) { sb.append(num.charAt(0)); } else { sb.append(Integer.toString(Integer.parseInt(num.substring(0,1),36) - 1,36)); } for (int j=1; j < num.length(); j++) { sb.append(Path.SEPARATOR).append(num.charAt(j)); } Assert.assertEquals(sb.toString(),hDir.getRelativePathForLocalization()); } String testPath1="4"; String testPath2="2"; hDir.decrementFileCountForPath(testPath1); hDir.decrementFileCountForPath(testPath2); Assert.assertEquals(testPath1,hDir.getRelativePathForLocalization()); Assert.assertEquals(testPath2,hDir.getRelativePathForLocalization()); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testIncrementFileCountForPath(){ YarnConfiguration conf=new YarnConfiguration(); conf.setInt(YarnConfiguration.NM_LOCAL_CACHE_MAX_FILES_PER_DIRECTORY,LocalCacheDirectoryManager.DIRECTORIES_PER_LEVEL + 2); LocalCacheDirectoryManager mgr=new LocalCacheDirectoryManager(conf); final String rootPath=""; mgr.incrementFileCountForPath(rootPath); Assert.assertEquals(rootPath,mgr.getRelativePathForLocalization()); Assert.assertFalse("root dir should be full",rootPath.equals(mgr.getRelativePathForLocalization())); mgr.getRelativePathForLocalization(); mgr.decrementFileCountForPath(rootPath); mgr.decrementFileCountForPath(rootPath); Assert.assertEquals(rootPath,mgr.getRelativePathForLocalization()); Assert.assertEquals(rootPath,mgr.getRelativePathForLocalization()); String otherDir=mgr.getRelativePathForLocalization(); Assert.assertFalse("root dir should be full",otherDir.equals(rootPath)); final String deepDir0="d/e/e/p/0"; final String deepDir1="d/e/e/p/1"; final String deepDir2="d/e/e/p/2"; final String deepDir3="d/e/e/p/3"; mgr.incrementFileCountForPath(deepDir0); Assert.assertEquals(otherDir,mgr.getRelativePathForLocalization()); Assert.assertEquals(deepDir0,mgr.getRelativePathForLocalization()); Assert.assertEquals("total dir count incorrect after increment",deepDir1,mgr.getRelativePathForLocalization()); mgr.incrementFileCountForPath(deepDir2); mgr.incrementFileCountForPath(deepDir1); mgr.incrementFileCountForPath(deepDir2); Assert.assertEquals(deepDir3,mgr.getRelativePathForLocalization()); }

Class: org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.TestLocalResourcesTrackerImpl

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test @SuppressWarnings("unchecked") public void testStateStoreSuccessfulLocalization() throws Exception { final String user="someuser"; final ApplicationId appId=ApplicationId.newInstance(1,1); final Path localDir=new Path("/tmp"); Configuration conf=new YarnConfiguration(); DrainDispatcher dispatcher=null; dispatcher=createDispatcher(conf); EventHandler localizerEventHandler=mock(EventHandler.class); EventHandler containerEventHandler=mock(EventHandler.class); dispatcher.register(LocalizerEventType.class,localizerEventHandler); dispatcher.register(ContainerEventType.class,containerEventHandler); DeletionService mockDelService=mock(DeletionService.class); NMStateStoreService stateStore=mock(NMStateStoreService.class); try { LocalResourcesTracker tracker=new LocalResourcesTrackerImpl(user,appId,dispatcher,false,conf,stateStore); ContainerId cId1=BuilderUtils.newContainerId(1,1,1,1); LocalResourceRequest lr1=createLocalResourceRequest(user,1,1,LocalResourceVisibility.APPLICATION); LocalizerContext lc1=new LocalizerContext(user,cId1,null); ResourceEvent reqEvent1=new ResourceRequestEvent(lr1,LocalResourceVisibility.APPLICATION,lc1); tracker.handle(reqEvent1); dispatcher.await(); Path hierarchicalPath1=tracker.getPathForLocalization(lr1,localDir); ArgumentCaptor localResourceCaptor=ArgumentCaptor.forClass(LocalResourceProto.class); ArgumentCaptor pathCaptor=ArgumentCaptor.forClass(Path.class); verify(stateStore).startResourceLocalization(eq(user),eq(appId),localResourceCaptor.capture(),pathCaptor.capture()); LocalResourceProto lrProto=localResourceCaptor.getValue(); Path localizedPath1=pathCaptor.getValue(); Assert.assertEquals(lr1,new LocalResourceRequest(new LocalResourcePBImpl(lrProto))); Assert.assertEquals(hierarchicalPath1,localizedPath1.getParent()); ResourceLocalizedEvent rle1=new ResourceLocalizedEvent(lr1,pathCaptor.getValue(),120); tracker.handle(rle1); dispatcher.await(); ArgumentCaptor localizedProtoCaptor=ArgumentCaptor.forClass(LocalizedResourceProto.class); verify(stateStore).finishResourceLocalization(eq(user),eq(appId),localizedProtoCaptor.capture()); LocalizedResourceProto localizedProto=localizedProtoCaptor.getValue(); Assert.assertEquals(lr1,new LocalResourceRequest(new LocalResourcePBImpl(localizedProto.getResource()))); Assert.assertEquals(localizedPath1.toString(),localizedProto.getLocalPath()); LocalizedResource localizedRsrc1=tracker.getLocalizedResource(lr1); Assert.assertNotNull(localizedRsrc1); tracker.handle(new ResourceReleaseEvent(lr1,cId1)); dispatcher.await(); boolean removeResult=tracker.remove(localizedRsrc1,mockDelService); Assert.assertTrue(removeResult); verify(stateStore).removeLocalizedResource(eq(user),eq(appId),eq(localizedPath1)); } finally { if (dispatcher != null) { dispatcher.stop(); } } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) @SuppressWarnings("unchecked") public void test(){ String user="testuser"; DrainDispatcher dispatcher=null; try { Configuration conf=new Configuration(); dispatcher=createDispatcher(conf); EventHandler localizerEventHandler=mock(EventHandler.class); EventHandler containerEventHandler=mock(EventHandler.class); dispatcher.register(LocalizerEventType.class,localizerEventHandler); dispatcher.register(ContainerEventType.class,containerEventHandler); DeletionService mockDelService=mock(DeletionService.class); ContainerId cId1=BuilderUtils.newContainerId(1,1,1,1); LocalizerContext lc1=new LocalizerContext(user,cId1,null); ContainerId cId2=BuilderUtils.newContainerId(1,1,1,2); LocalizerContext lc2=new LocalizerContext(user,cId2,null); LocalResourceRequest req1=createLocalResourceRequest(user,1,1,LocalResourceVisibility.PUBLIC); LocalResourceRequest req2=createLocalResourceRequest(user,2,1,LocalResourceVisibility.PUBLIC); LocalizedResource lr1=createLocalizedResource(req1,dispatcher); LocalizedResource lr2=createLocalizedResource(req2,dispatcher); ConcurrentMap localrsrc=new ConcurrentHashMap(); localrsrc.put(req1,lr1); localrsrc.put(req2,lr2); LocalResourcesTracker tracker=new LocalResourcesTrackerImpl(user,null,dispatcher,localrsrc,false,conf,new NMNullStateStoreService()); ResourceEvent req11Event=new ResourceRequestEvent(req1,LocalResourceVisibility.PUBLIC,lc1); ResourceEvent req12Event=new ResourceRequestEvent(req1,LocalResourceVisibility.PUBLIC,lc2); ResourceEvent req21Event=new ResourceRequestEvent(req2,LocalResourceVisibility.PUBLIC,lc1); ResourceEvent rel11Event=new ResourceReleaseEvent(req1,cId1); ResourceEvent rel12Event=new ResourceReleaseEvent(req1,cId2); ResourceEvent rel21Event=new ResourceReleaseEvent(req2,cId1); tracker.handle(req11Event); tracker.handle(req12Event); tracker.handle(req21Event); dispatcher.await(); verify(localizerEventHandler,times(3)).handle(any(LocalizerResourceRequestEvent.class)); Assert.assertEquals(2,lr1.getRefCount()); Assert.assertEquals(1,lr2.getRefCount()); tracker.handle(rel21Event); dispatcher.await(); verifyTrackedResourceCount(tracker,2); Assert.assertEquals(2,lr1.getRefCount()); Assert.assertFalse(tracker.remove(lr1,mockDelService)); verifyTrackedResourceCount(tracker,2); ResourceLocalizedEvent rle=new ResourceLocalizedEvent(req1,new Path("file:///tmp/r1"),1); lr1.handle(rle); Assert.assertTrue(lr1.getState().equals(ResourceState.LOCALIZED)); tracker.handle(rel11Event); tracker.handle(rel12Event); Assert.assertEquals(0,lr1.getRefCount()); Assert.assertTrue(tracker.remove(lr1,mockDelService)); verifyTrackedResourceCount(tracker,1); } finally { if (dispatcher != null) { dispatcher.stop(); } } }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test @SuppressWarnings("unchecked") public void testRecoveredResourceWithDirCacheMgr() throws Exception { final String user="someuser"; final ApplicationId appId=ApplicationId.newInstance(1,1); final Path localDirRoot=new Path("/tmp/localdir"); Configuration conf=new YarnConfiguration(); DrainDispatcher dispatcher=null; dispatcher=createDispatcher(conf); EventHandler localizerEventHandler=mock(EventHandler.class); EventHandler containerEventHandler=mock(EventHandler.class); dispatcher.register(LocalizerEventType.class,localizerEventHandler); dispatcher.register(ContainerEventType.class,containerEventHandler); NMStateStoreService stateStore=mock(NMStateStoreService.class); try { LocalResourcesTrackerImpl tracker=new LocalResourcesTrackerImpl(user,appId,dispatcher,true,conf,stateStore); LocalResourceRequest lr1=createLocalResourceRequest(user,1,1,LocalResourceVisibility.PUBLIC); Assert.assertNull(tracker.getLocalizedResource(lr1)); final long localizedId1=52; Path hierarchicalPath1=new Path(localDirRoot + "/4/2",Long.toString(localizedId1)); Path localizedPath1=new Path(hierarchicalPath1,"resource.jar"); tracker.handle(new ResourceRecoveredEvent(lr1,localizedPath1,120)); dispatcher.await(); Assert.assertNotNull(tracker.getLocalizedResource(lr1)); LocalCacheDirectoryManager dirMgrRoot=tracker.getDirectoryManager(localDirRoot); Assert.assertEquals(0,dirMgrRoot.getDirectory("").getCount()); Assert.assertEquals(1,dirMgrRoot.getDirectory("4/2").getCount()); LocalResourceRequest lr2=createLocalResourceRequest(user,2,2,LocalResourceVisibility.PUBLIC); Assert.assertNull(tracker.getLocalizedResource(lr2)); final long localizedId2=localizedId1 + 1; Path hierarchicalPath2=new Path(localDirRoot + "/4/2",Long.toString(localizedId2)); Path localizedPath2=new Path(hierarchicalPath2,"resource.jar"); tracker.handle(new ResourceRecoveredEvent(lr2,localizedPath2,120)); dispatcher.await(); Assert.assertNotNull(tracker.getLocalizedResource(lr2)); Assert.assertEquals(0,dirMgrRoot.getDirectory("").getCount()); Assert.assertEquals(2,dirMgrRoot.getDirectory("4/2").getCount()); LocalResourceRequest lr3=createLocalResourceRequest(user,3,3,LocalResourceVisibility.PUBLIC); Assert.assertNull(tracker.getLocalizedResource(lr3)); final long localizedId3=128; Path hierarchicalPath3=new Path(localDirRoot + "/4/3",Long.toString(localizedId3)); Path localizedPath3=new Path(hierarchicalPath3,"resource.jar"); tracker.handle(new ResourceRecoveredEvent(lr3,localizedPath3,120)); dispatcher.await(); Assert.assertNotNull(tracker.getLocalizedResource(lr3)); Assert.assertEquals(0,dirMgrRoot.getDirectory("").getCount()); Assert.assertEquals(2,dirMgrRoot.getDirectory("4/2").getCount()); Assert.assertEquals(1,dirMgrRoot.getDirectory("4/3").getCount()); LocalResourceRequest lr4=createLocalResourceRequest(user,4,4,LocalResourceVisibility.PUBLIC); Assert.assertNull(tracker.getLocalizedResource(lr4)); final long localizedId4=256; Path hierarchicalPath4=new Path(localDirRoot + "/4",Long.toString(localizedId4)); Path localizedPath4=new Path(hierarchicalPath4,"resource.jar"); tracker.handle(new ResourceRecoveredEvent(lr4,localizedPath4,120)); dispatcher.await(); Assert.assertNotNull(tracker.getLocalizedResource(lr4)); Assert.assertEquals(0,dirMgrRoot.getDirectory("").getCount()); Assert.assertEquals(1,dirMgrRoot.getDirectory("4").getCount()); Assert.assertEquals(2,dirMgrRoot.getDirectory("4/2").getCount()); Assert.assertEquals(1,dirMgrRoot.getDirectory("4/3").getCount()); } finally { if (dispatcher != null) { dispatcher.stop(); } } }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test @SuppressWarnings("unchecked") public void testRecoveredResource() throws Exception { final String user="someuser"; final ApplicationId appId=ApplicationId.newInstance(1,1); final Path localDir=new Path("/tmp/localdir"); Configuration conf=new YarnConfiguration(); DrainDispatcher dispatcher=null; dispatcher=createDispatcher(conf); EventHandler localizerEventHandler=mock(EventHandler.class); EventHandler containerEventHandler=mock(EventHandler.class); dispatcher.register(LocalizerEventType.class,localizerEventHandler); dispatcher.register(ContainerEventType.class,containerEventHandler); NMStateStoreService stateStore=mock(NMStateStoreService.class); try { LocalResourcesTracker tracker=new LocalResourcesTrackerImpl(user,appId,dispatcher,false,conf,stateStore); ContainerId cId1=BuilderUtils.newContainerId(1,1,1,1); LocalResourceRequest lr1=createLocalResourceRequest(user,1,1,LocalResourceVisibility.APPLICATION); Assert.assertNull(tracker.getLocalizedResource(lr1)); final long localizedId1=52; Path hierarchicalPath1=new Path(localDir,Long.toString(localizedId1)); Path localizedPath1=new Path(hierarchicalPath1,"resource.jar"); tracker.handle(new ResourceRecoveredEvent(lr1,localizedPath1,120)); dispatcher.await(); Assert.assertNotNull(tracker.getLocalizedResource(lr1)); LocalResourceRequest lr2=createLocalResourceRequest(user,2,2,LocalResourceVisibility.APPLICATION); LocalizerContext lc2=new LocalizerContext(user,cId1,null); ResourceEvent reqEvent2=new ResourceRequestEvent(lr2,LocalResourceVisibility.APPLICATION,lc2); tracker.handle(reqEvent2); dispatcher.await(); Path hierarchicalPath2=tracker.getPathForLocalization(lr2,localDir); long localizedId2=Long.parseLong(hierarchicalPath2.getName()); Assert.assertEquals(localizedId1 + 1,localizedId2); } finally { if (dispatcher != null) { dispatcher.stop(); } } }

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=1000) @SuppressWarnings("unchecked") public void testLocalResourceCache(){ String user="testuser"; DrainDispatcher dispatcher=null; try { Configuration conf=new Configuration(); dispatcher=createDispatcher(conf); EventHandler localizerEventHandler=mock(EventHandler.class); EventHandler containerEventHandler=mock(EventHandler.class); dispatcher.register(LocalizerEventType.class,localizerEventHandler); dispatcher.register(ContainerEventType.class,containerEventHandler); ConcurrentMap localrsrc=new ConcurrentHashMap(); LocalResourcesTracker tracker=new LocalResourcesTrackerImpl(user,null,dispatcher,localrsrc,true,conf,new NMNullStateStoreService()); LocalResourceRequest lr=createLocalResourceRequest(user,1,1,LocalResourceVisibility.PUBLIC); ContainerId cId1=BuilderUtils.newContainerId(1,1,1,1); LocalizerContext lc1=new LocalizerContext(user,cId1,null); ResourceEvent reqEvent1=new ResourceRequestEvent(lr,LocalResourceVisibility.PRIVATE,lc1); Assert.assertEquals(0,localrsrc.size()); tracker.handle(reqEvent1); dispatcher.await(); Assert.assertEquals(1,localrsrc.size()); Assert.assertTrue(localrsrc.containsKey(lr)); Assert.assertEquals(1,localrsrc.get(lr).getRefCount()); Assert.assertTrue(localrsrc.get(lr).ref.contains(cId1)); Assert.assertEquals(ResourceState.DOWNLOADING,localrsrc.get(lr).getState()); ContainerId cId2=BuilderUtils.newContainerId(1,1,1,2); LocalizerContext lc2=new LocalizerContext(user,cId2,null); ResourceEvent reqEvent2=new ResourceRequestEvent(lr,LocalResourceVisibility.PRIVATE,lc2); tracker.handle(reqEvent2); dispatcher.await(); Assert.assertEquals(2,localrsrc.get(lr).getRefCount()); Assert.assertTrue(localrsrc.get(lr).ref.contains(cId2)); ResourceEvent resourceFailedEvent=new ResourceFailedLocalizationEvent(lr,(new Exception("test").getMessage())); LocalizedResource localizedResource=localrsrc.get(lr); tracker.handle(resourceFailedEvent); dispatcher.await(); Assert.assertEquals(0,localrsrc.size()); verify(containerEventHandler,times(2)).handle(isA(ContainerResourceFailedEvent.class)); Assert.assertEquals(ResourceState.FAILED,localizedResource.getState()); ResourceReleaseEvent relEvent1=new ResourceReleaseEvent(lr,cId1); tracker.handle(relEvent1); dispatcher.await(); ContainerId cId3=BuilderUtils.newContainerId(1,1,1,3); LocalizerContext lc3=new LocalizerContext(user,cId3,null); ResourceEvent reqEvent3=new ResourceRequestEvent(lr,LocalResourceVisibility.PRIVATE,lc3); tracker.handle(reqEvent3); dispatcher.await(); Assert.assertEquals(1,localrsrc.size()); Assert.assertTrue(localrsrc.containsKey(lr)); Assert.assertEquals(1,localrsrc.get(lr).getRefCount()); Assert.assertTrue(localrsrc.get(lr).ref.contains(cId3)); ResourceReleaseEvent relEvent2=new ResourceReleaseEvent(lr,cId2); tracker.handle(relEvent2); dispatcher.await(); Assert.assertEquals(1,localrsrc.size()); Assert.assertTrue(localrsrc.containsKey(lr)); Assert.assertEquals(1,localrsrc.get(lr).getRefCount()); Assert.assertTrue(localrsrc.get(lr).ref.contains(cId3)); Path localizedPath=new Path("/tmp/file1"); ResourceLocalizedEvent localizedEvent=new ResourceLocalizedEvent(lr,localizedPath,123L); tracker.handle(localizedEvent); dispatcher.await(); verify(containerEventHandler,times(1)).handle(isA(ContainerResourceLocalizedEvent.class)); Assert.assertEquals(ResourceState.LOCALIZED,localrsrc.get(lr).getState()); Assert.assertEquals(1,localrsrc.get(lr).getRefCount()); ResourceReleaseEvent relEvent3=new ResourceReleaseEvent(lr,cId3); tracker.handle(relEvent3); dispatcher.await(); Assert.assertEquals(0,localrsrc.get(lr).getRefCount()); } finally { if (dispatcher != null) { dispatcher.stop(); } } }

APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier 
@Test(timeout=100000) @SuppressWarnings("unchecked") public void testHierarchicalLocalCacheDirectories(){ String user="testuser"; DrainDispatcher dispatcher=null; try { Configuration conf=new Configuration(); conf.set(YarnConfiguration.NM_LOCAL_CACHE_MAX_FILES_PER_DIRECTORY,"37"); dispatcher=createDispatcher(conf); EventHandler localizerEventHandler=mock(EventHandler.class); EventHandler containerEventHandler=mock(EventHandler.class); dispatcher.register(LocalizerEventType.class,localizerEventHandler); dispatcher.register(ContainerEventType.class,containerEventHandler); DeletionService mockDelService=mock(DeletionService.class); ConcurrentMap localrsrc=new ConcurrentHashMap(); LocalResourcesTracker tracker=new LocalResourcesTrackerImpl(user,null,dispatcher,localrsrc,true,conf,new NMNullStateStoreService()); Path localDir=new Path("/tmp"); ContainerId cId1=BuilderUtils.newContainerId(1,1,1,1); LocalResourceRequest lr1=createLocalResourceRequest(user,1,1,LocalResourceVisibility.PUBLIC); LocalizerContext lc1=new LocalizerContext(user,cId1,null); ResourceEvent reqEvent1=new ResourceRequestEvent(lr1,LocalResourceVisibility.PUBLIC,lc1); tracker.handle(reqEvent1); Path hierarchicalPath1=tracker.getPathForLocalization(lr1,localDir).getParent(); ResourceLocalizedEvent rle1=new ResourceLocalizedEvent(lr1,new Path(hierarchicalPath1.toUri().toString() + Path.SEPARATOR + "file1"),120); tracker.handle(rle1); LocalResourceRequest lr2=createLocalResourceRequest(user,3,3,LocalResourceVisibility.PUBLIC); ResourceEvent reqEvent2=new ResourceRequestEvent(lr2,LocalResourceVisibility.PUBLIC,lc1); tracker.handle(reqEvent2); Path hierarchicalPath2=tracker.getPathForLocalization(lr2,localDir).getParent(); ResourceFailedLocalizationEvent rfe2=new ResourceFailedLocalizationEvent(lr2,new Exception("Test").toString()); tracker.handle(rfe2); Assert.assertNotSame(hierarchicalPath1,hierarchicalPath2); LocalResourceRequest lr3=createLocalResourceRequest(user,2,2,LocalResourceVisibility.PUBLIC); ResourceEvent reqEvent3=new ResourceRequestEvent(lr3,LocalResourceVisibility.PUBLIC,lc1); tracker.handle(reqEvent3); Path hierarchicalPath3=tracker.getPathForLocalization(lr3,localDir).getParent(); ResourceLocalizedEvent rle3=new ResourceLocalizedEvent(lr3,new Path(hierarchicalPath3.toUri().toString() + Path.SEPARATOR + "file3"),120); tracker.handle(rle3); Assert.assertEquals(hierarchicalPath3.toUri().toString(),hierarchicalPath1.toUri().toString() + Path.SEPARATOR + "0"); ResourceEvent relEvent1=new ResourceReleaseEvent(lr1,cId1); tracker.handle(relEvent1); int resources=0; Iterator iter=tracker.iterator(); while (iter.hasNext()) { iter.next(); resources++; } Assert.assertEquals(2,resources); iter=tracker.iterator(); while (iter.hasNext()) { LocalizedResource rsrc=iter.next(); if (rsrc.getRefCount() == 0) { Assert.assertTrue(tracker.remove(rsrc,mockDelService)); resources--; } } Assert.assertEquals(1,resources); } finally { if (dispatcher != null) { dispatcher.stop(); } } }

APIUtilityVerifier BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) @SuppressWarnings("unchecked") public void testConsistency(){ String user="testuser"; DrainDispatcher dispatcher=null; try { Configuration conf=new Configuration(); dispatcher=createDispatcher(conf); EventHandler localizerEventHandler=mock(EventHandler.class); EventHandler containerEventHandler=mock(EventHandler.class); dispatcher.register(LocalizerEventType.class,localizerEventHandler); dispatcher.register(ContainerEventType.class,containerEventHandler); ContainerId cId1=BuilderUtils.newContainerId(1,1,1,1); LocalizerContext lc1=new LocalizerContext(user,cId1,null); LocalResourceRequest req1=createLocalResourceRequest(user,1,1,LocalResourceVisibility.PUBLIC); LocalizedResource lr1=createLocalizedResource(req1,dispatcher); ConcurrentMap localrsrc=new ConcurrentHashMap(); localrsrc.put(req1,lr1); LocalResourcesTracker tracker=new LocalResourcesTrackerImpl(user,null,dispatcher,localrsrc,false,conf,new NMNullStateStoreService()); ResourceEvent req11Event=new ResourceRequestEvent(req1,LocalResourceVisibility.PUBLIC,lc1); ResourceEvent rel11Event=new ResourceReleaseEvent(req1,cId1); tracker.handle(req11Event); dispatcher.await(); Assert.assertEquals(1,lr1.getRefCount()); dispatcher.await(); verifyTrackedResourceCount(tracker,1); ResourceLocalizedEvent rle=new ResourceLocalizedEvent(req1,new Path("file:///tmp/r1"),1); lr1.handle(rle); Assert.assertTrue(lr1.getState().equals(ResourceState.LOCALIZED)); Assert.assertTrue(createdummylocalizefile(new Path("file:///tmp/r1"))); LocalizedResource rsrcbefore=tracker.iterator().next(); File resFile=new File(lr1.getLocalPath().toUri().getRawPath().toString()); Assert.assertTrue(resFile.exists()); Assert.assertTrue(resFile.delete()); tracker.handle(req11Event); dispatcher.await(); lr1.handle(rle); Assert.assertTrue(lr1.getState().equals(ResourceState.LOCALIZED)); LocalizedResource rsrcafter=tracker.iterator().next(); if (rsrcbefore == rsrcafter) { Assert.fail("Localized resource should not be equal"); } tracker.handle(rel11Event); } finally { if (dispatcher != null) { dispatcher.stop(); } } }

Class: org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.TestResourceLocalizationService

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test @SuppressWarnings("unchecked") public void testRecovery() throws Exception { final String user1="user1"; final String user2="user2"; final ApplicationId appId1=ApplicationId.newInstance(1,1); final ApplicationId appId2=ApplicationId.newInstance(1,2); List localDirs=new ArrayList(); String[] sDirs=new String[4]; for (int i=0; i < 4; ++i) { localDirs.add(lfs.makeQualified(new Path(basedir,i + ""))); sDirs[i]=localDirs.get(i).toString(); } conf.setStrings(YarnConfiguration.NM_LOCAL_DIRS,sDirs); conf.setBoolean(YarnConfiguration.NM_RECOVERY_ENABLED,true); NMMemoryStateStoreService stateStore=new NMMemoryStateStoreService(); stateStore.init(conf); stateStore.start(); DrainDispatcher dispatcher=new DrainDispatcher(); dispatcher.init(conf); dispatcher.start(); EventHandler applicationBus=mock(EventHandler.class); dispatcher.register(ApplicationEventType.class,applicationBus); EventHandler containerBus=mock(EventHandler.class); dispatcher.register(ContainerEventType.class,containerBus); EventHandler localizerBus=mock(EventHandler.class); dispatcher.register(LocalizerEventType.class,localizerBus); LocalDirsHandlerService dirsHandler=new LocalDirsHandlerService(); dirsHandler.init(conf); ResourceLocalizationService spyService=createSpyService(dispatcher,dirsHandler,stateStore); try { spyService.init(conf); spyService.start(); final Application app1=mock(Application.class); when(app1.getUser()).thenReturn(user1); when(app1.getAppId()).thenReturn(appId1); final Application app2=mock(Application.class); when(app2.getUser()).thenReturn(user2); when(app2.getAppId()).thenReturn(appId2); spyService.handle(new ApplicationLocalizationEvent(LocalizationEventType.INIT_APPLICATION_RESOURCES,app1)); spyService.handle(new ApplicationLocalizationEvent(LocalizationEventType.INIT_APPLICATION_RESOURCES,app2)); dispatcher.await(); LocalResourcesTracker appTracker1=spyService.getLocalResourcesTracker(LocalResourceVisibility.APPLICATION,user1,appId1); LocalResourcesTracker privTracker1=spyService.getLocalResourcesTracker(LocalResourceVisibility.PRIVATE,user1,null); LocalResourcesTracker appTracker2=spyService.getLocalResourcesTracker(LocalResourceVisibility.APPLICATION,user2,appId2); LocalResourcesTracker pubTracker=spyService.getLocalResourcesTracker(LocalResourceVisibility.PUBLIC,null,null); final Container c1=getMockContainer(appId1,1,user1); final Container c2=getMockContainer(appId2,2,user2); Random r=new Random(); long seed=r.nextLong(); System.out.println("SEED: " + seed); r.setSeed(seed); final LocalResource privResource1=getPrivateMockedResource(r); final LocalResourceRequest privReq1=new LocalResourceRequest(privResource1); final LocalResource privResource2=getPrivateMockedResource(r); final LocalResourceRequest privReq2=new LocalResourceRequest(privResource2); final LocalResource pubResource1=getPublicMockedResource(r); final LocalResourceRequest pubReq1=new LocalResourceRequest(pubResource1); final LocalResource pubResource2=getPublicMockedResource(r); final LocalResourceRequest pubReq2=new LocalResourceRequest(pubResource2); final LocalResource appResource1=getAppMockedResource(r); final LocalResourceRequest appReq1=new LocalResourceRequest(appResource1); final LocalResource appResource2=getAppMockedResource(r); final LocalResourceRequest appReq2=new LocalResourceRequest(appResource2); final LocalResource appResource3=getAppMockedResource(r); final LocalResourceRequest appReq3=new LocalResourceRequest(appResource3); Map> req1=new HashMap>(); req1.put(LocalResourceVisibility.PRIVATE,Arrays.asList(new LocalResourceRequest[]{privReq1,privReq2})); req1.put(LocalResourceVisibility.PUBLIC,Collections.singletonList(pubReq1)); req1.put(LocalResourceVisibility.APPLICATION,Collections.singletonList(appReq1)); Map> req2=new HashMap>(); req2.put(LocalResourceVisibility.APPLICATION,Arrays.asList(new LocalResourceRequest[]{appReq2,appReq3})); req2.put(LocalResourceVisibility.PUBLIC,Collections.singletonList(pubReq2)); spyService.handle(new ContainerLocalizationRequestEvent(c1,req1)); spyService.handle(new ContainerLocalizationRequestEvent(c2,req2)); dispatcher.await(); privTracker1.getPathForLocalization(privReq1,dirsHandler.getLocalPathForWrite(ContainerLocalizer.USERCACHE + user1)); privTracker1.getPathForLocalization(privReq2,dirsHandler.getLocalPathForWrite(ContainerLocalizer.USERCACHE + user1)); LocalizedResource privLr1=privTracker1.getLocalizedResource(privReq1); LocalizedResource privLr2=privTracker1.getLocalizedResource(privReq2); appTracker1.getPathForLocalization(appReq1,dirsHandler.getLocalPathForWrite(ContainerLocalizer.APPCACHE + appId1)); LocalizedResource appLr1=appTracker1.getLocalizedResource(appReq1); appTracker2.getPathForLocalization(appReq2,dirsHandler.getLocalPathForWrite(ContainerLocalizer.APPCACHE + appId2)); LocalizedResource appLr2=appTracker2.getLocalizedResource(appReq2); appTracker2.getPathForLocalization(appReq3,dirsHandler.getLocalPathForWrite(ContainerLocalizer.APPCACHE + appId2)); LocalizedResource appLr3=appTracker2.getLocalizedResource(appReq3); pubTracker.getPathForLocalization(pubReq1,dirsHandler.getLocalPathForWrite(ContainerLocalizer.FILECACHE)); LocalizedResource pubLr1=pubTracker.getLocalizedResource(pubReq1); pubTracker.getPathForLocalization(pubReq2,dirsHandler.getLocalPathForWrite(ContainerLocalizer.FILECACHE)); LocalizedResource pubLr2=pubTracker.getLocalizedResource(pubReq2); assertNotNull("Localization not started",privLr1.getLocalPath()); privTracker1.handle(new ResourceLocalizedEvent(privReq1,privLr1.getLocalPath(),privLr1.getSize() + 5)); assertNotNull("Localization not started",privLr2.getLocalPath()); privTracker1.handle(new ResourceLocalizedEvent(privReq2,privLr2.getLocalPath(),privLr2.getSize() + 10)); assertNotNull("Localization not started",appLr1.getLocalPath()); appTracker1.handle(new ResourceLocalizedEvent(appReq1,appLr1.getLocalPath(),appLr1.getSize())); assertNotNull("Localization not started",appLr3.getLocalPath()); appTracker2.handle(new ResourceLocalizedEvent(appReq3,appLr3.getLocalPath(),appLr3.getSize() + 7)); assertNotNull("Localization not started",pubLr1.getLocalPath()); pubTracker.handle(new ResourceLocalizedEvent(pubReq1,pubLr1.getLocalPath(),pubLr1.getSize() + 1000)); assertNotNull("Localization not started",pubLr2.getLocalPath()); pubTracker.handle(new ResourceLocalizedEvent(pubReq2,pubLr2.getLocalPath(),pubLr2.getSize() + 99999)); dispatcher.await(); assertEquals(ResourceState.LOCALIZED,privLr1.getState()); assertEquals(ResourceState.LOCALIZED,privLr2.getState()); assertEquals(ResourceState.LOCALIZED,appLr1.getState()); assertEquals(ResourceState.DOWNLOADING,appLr2.getState()); assertEquals(ResourceState.LOCALIZED,appLr3.getState()); assertEquals(ResourceState.LOCALIZED,pubLr1.getState()); assertEquals(ResourceState.LOCALIZED,pubLr2.getState()); spyService=createSpyService(dispatcher,dirsHandler,stateStore); spyService.init(conf); spyService.recoverLocalizedResources(stateStore.loadLocalizationState()); dispatcher.await(); appTracker1=spyService.getLocalResourcesTracker(LocalResourceVisibility.APPLICATION,user1,appId1); privTracker1=spyService.getLocalResourcesTracker(LocalResourceVisibility.PRIVATE,user1,null); appTracker2=spyService.getLocalResourcesTracker(LocalResourceVisibility.APPLICATION,user2,appId2); pubTracker=spyService.getLocalResourcesTracker(LocalResourceVisibility.PUBLIC,null,null); LocalizedResource recoveredRsrc=privTracker1.getLocalizedResource(privReq1); assertEquals(privReq1,recoveredRsrc.getRequest()); assertEquals(privLr1.getLocalPath(),recoveredRsrc.getLocalPath()); assertEquals(privLr1.getSize(),recoveredRsrc.getSize()); assertEquals(ResourceState.LOCALIZED,recoveredRsrc.getState()); recoveredRsrc=privTracker1.getLocalizedResource(privReq2); assertEquals(privReq2,recoveredRsrc.getRequest()); assertEquals(privLr2.getLocalPath(),recoveredRsrc.getLocalPath()); assertEquals(privLr2.getSize(),recoveredRsrc.getSize()); assertEquals(ResourceState.LOCALIZED,recoveredRsrc.getState()); recoveredRsrc=appTracker1.getLocalizedResource(appReq1); assertEquals(appReq1,recoveredRsrc.getRequest()); assertEquals(appLr1.getLocalPath(),recoveredRsrc.getLocalPath()); assertEquals(appLr1.getSize(),recoveredRsrc.getSize()); assertEquals(ResourceState.LOCALIZED,recoveredRsrc.getState()); recoveredRsrc=appTracker2.getLocalizedResource(appReq2); assertNull("in-progress resource should not be present",recoveredRsrc); recoveredRsrc=appTracker2.getLocalizedResource(appReq3); assertEquals(appReq3,recoveredRsrc.getRequest()); assertEquals(appLr3.getLocalPath(),recoveredRsrc.getLocalPath()); assertEquals(appLr3.getSize(),recoveredRsrc.getSize()); assertEquals(ResourceState.LOCALIZED,recoveredRsrc.getState()); } finally { dispatcher.stop(); stateStore.close(); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=100000) @SuppressWarnings("unchecked") public void testParallelDownloadAttemptsForPrivateResource() throws Exception { DrainDispatcher dispatcher1=null; try { dispatcher1=new DrainDispatcher(); String user="testuser"; ApplicationId appId=BuilderUtils.newApplicationId(1,1); List localDirs=new ArrayList(); String[] sDirs=new String[1]; for (int i=0; i < 1; ++i) { localDirs.add(lfs.makeQualified(new Path(basedir,i + ""))); sDirs[i]=localDirs.get(i).toString(); } conf.setStrings(YarnConfiguration.NM_LOCAL_DIRS,sDirs); LocalDirsHandlerService localDirHandler=new LocalDirsHandlerService(); localDirHandler.init(conf); EventHandler applicationBus=mock(EventHandler.class); dispatcher1.register(ApplicationEventType.class,applicationBus); EventHandler containerBus=mock(EventHandler.class); dispatcher1.register(ContainerEventType.class,containerBus); ContainerExecutor exec=mock(ContainerExecutor.class); DeletionService delService=mock(DeletionService.class); LocalDirsHandlerService dirsHandler=new LocalDirsHandlerService(); dirsHandler.init(conf); dispatcher1.init(conf); dispatcher1.start(); ResourceLocalizationService rls=new ResourceLocalizationService(dispatcher1,exec,delService,localDirHandler,new NMNullStateStoreService()); dispatcher1.register(LocalizationEventType.class,rls); rls.init(conf); rls.handle(createApplicationLocalizationEvent(user,appId)); LocalResourceRequest req=new LocalResourceRequest(new Path("file:///tmp"),123L,LocalResourceType.FILE,LocalResourceVisibility.PRIVATE,""); ContainerImpl container1=createMockContainer(user,1); String localizerId1=container1.getContainerId().toString(); rls.getPrivateLocalizers().put(localizerId1,rls.new LocalizerRunner(new LocalizerContext(user,container1.getContainerId(),null),localizerId1)); LocalizerRunner localizerRunner1=rls.getLocalizerRunner(localizerId1); dispatcher1.getEventHandler().handle(createContainerLocalizationEvent(container1,LocalResourceVisibility.PRIVATE,req)); Assert.assertTrue(waitForPrivateDownloadToStart(rls,localizerId1,1,200)); ContainerImpl container2=createMockContainer(user,2); String localizerId2=container2.getContainerId().toString(); rls.getPrivateLocalizers().put(localizerId2,rls.new LocalizerRunner(new LocalizerContext(user,container2.getContainerId(),null),localizerId2)); LocalizerRunner localizerRunner2=rls.getLocalizerRunner(localizerId2); dispatcher1.getEventHandler().handle(createContainerLocalizationEvent(container2,LocalResourceVisibility.PRIVATE,req)); Assert.assertTrue(waitForPrivateDownloadToStart(rls,localizerId2,1,200)); LocalResourcesTracker tracker=rls.getLocalResourcesTracker(LocalResourceVisibility.PRIVATE,user,appId); LocalizedResource lr=tracker.getLocalizedResource(req); Assert.assertEquals(ResourceState.DOWNLOADING,lr.getState()); Assert.assertEquals(1,lr.sem.availablePermits()); LocalizerHeartbeatResponse response1=rls.heartbeat(createLocalizerStatus(localizerId1)); Assert.assertEquals(1,localizerRunner1.scheduled.size()); Assert.assertEquals(req.getResource(),response1.getResourceSpecs().get(0).getResource().getResource()); Assert.assertEquals(0,lr.sem.availablePermits()); LocalizerHeartbeatResponse response2=rls.heartbeat(createLocalizerStatus(localizerId2)); Assert.assertEquals(0,localizerRunner2.scheduled.size()); Assert.assertEquals(0,response2.getResourceSpecs().size()); rls.heartbeat(createLocalizerStatusForFailedResource(localizerId1,req)); Assert.assertTrue(waitForResourceState(lr,rls,req,LocalResourceVisibility.PRIVATE,user,appId,ResourceState.FAILED,200)); Assert.assertTrue(lr.getState().equals(ResourceState.FAILED)); Assert.assertEquals(0,localizerRunner1.scheduled.size()); response2=rls.heartbeat(createLocalizerStatus(localizerId2)); Assert.assertEquals(0,localizerRunner2.scheduled.size()); Assert.assertEquals(0,localizerRunner2.pending.size()); Assert.assertEquals(0,response2.getResourceSpecs().size()); } finally { if (dispatcher1 != null) { dispatcher1.stop(); } } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=100000) @SuppressWarnings("unchecked") public void testParallelDownloadAttemptsForPublicResource() throws Exception { DrainDispatcher dispatcher1=null; String user="testuser"; try { List localDirs=new ArrayList(); String[] sDirs=new String[1]; for (int i=0; i < 1; ++i) { localDirs.add(lfs.makeQualified(new Path(basedir,i + ""))); sDirs[i]=localDirs.get(i).toString(); } conf.setStrings(YarnConfiguration.NM_LOCAL_DIRS,sDirs); EventHandler applicationBus=mock(EventHandler.class); dispatcher1=new DrainDispatcher(); dispatcher1.register(ApplicationEventType.class,applicationBus); EventHandler containerBus=mock(EventHandler.class); dispatcher1.register(ContainerEventType.class,containerBus); ContainerExecutor exec=mock(ContainerExecutor.class); DeletionService delService=mock(DeletionService.class); LocalDirsHandlerService dirsHandler=new LocalDirsHandlerService(); dirsHandler.init(conf); dispatcher1.init(conf); dispatcher1.start(); ResourceLocalizationService rawService=new ResourceLocalizationService(dispatcher1,exec,delService,dirsHandler,new NMNullStateStoreService()); ResourceLocalizationService spyService=spy(rawService); dispatcher1.register(LocalizationEventType.class,spyService); spyService.init(conf); Assert.assertEquals(0,spyService.getPublicLocalizer().pending.size()); LocalResourceRequest req=new LocalResourceRequest(new Path("/tmp"),123L,LocalResourceType.FILE,LocalResourceVisibility.PUBLIC,""); ApplicationImpl app=mock(ApplicationImpl.class); ApplicationId appId=BuilderUtils.newApplicationId(1,1); when(app.getAppId()).thenReturn(appId); when(app.getUser()).thenReturn(user); dispatcher1.getEventHandler().handle(new ApplicationLocalizationEvent(LocalizationEventType.INIT_APPLICATION_RESOURCES,app)); ContainerImpl container1=createMockContainer(user,1); dispatcher1.getEventHandler().handle(createContainerLocalizationEvent(container1,LocalResourceVisibility.PUBLIC,req)); Assert.assertTrue(waitForResourceState(null,spyService,req,LocalResourceVisibility.PUBLIC,user,null,ResourceState.DOWNLOADING,200)); Assert.assertTrue(waitForPublicDownloadToStart(spyService,1,200)); LocalizedResource lr=getLocalizedResource(spyService,req,LocalResourceVisibility.PUBLIC,user,null); Assert.assertEquals(ResourceState.DOWNLOADING,lr.getState()); Assert.assertEquals(1,spyService.getPublicLocalizer().pending.size()); Assert.assertEquals(0,lr.sem.availablePermits()); ContainerImpl container2=createMockContainer(user,2); dispatcher1.getEventHandler().handle(createContainerLocalizationEvent(container2,LocalResourceVisibility.PUBLIC,req)); Assert.assertFalse(waitForPublicDownloadToStart(spyService,2,100)); ResourceFailedLocalizationEvent locFailedEvent=new ResourceFailedLocalizationEvent(req,new Exception("test").toString()); spyService.getLocalResourcesTracker(LocalResourceVisibility.PUBLIC,user,null).handle(locFailedEvent); Assert.assertTrue(waitForResourceState(lr,spyService,req,LocalResourceVisibility.PUBLIC,user,null,ResourceState.FAILED,200)); lr.unlock(); spyService.getPublicLocalizer().pending.clear(); LocalizerResourceRequestEvent localizerEvent=new LocalizerResourceRequestEvent(lr,null,mock(LocalizerContext.class),null); dispatcher1.getEventHandler().handle(localizerEvent); Assert.assertFalse(waitForPublicDownloadToStart(spyService,1,100)); Assert.assertEquals(1,lr.sem.availablePermits()); } finally { if (dispatcher1 != null) { dispatcher1.stop(); } } }

APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) @SuppressWarnings("unchecked") public void testLocalResourcePath() throws Exception { DrainDispatcher dispatcher1=null; try { dispatcher1=new DrainDispatcher(); String user="testuser"; ApplicationId appId=BuilderUtils.newApplicationId(1,1); List localDirs=new ArrayList(); String[] sDirs=new String[1]; for (int i=0; i < 1; ++i) { localDirs.add(lfs.makeQualified(new Path(basedir,i + ""))); sDirs[i]=localDirs.get(i).toString(); } conf.setStrings(YarnConfiguration.NM_LOCAL_DIRS,sDirs); LocalDirsHandlerService localDirHandler=new LocalDirsHandlerService(); localDirHandler.init(conf); EventHandler applicationBus=mock(EventHandler.class); dispatcher1.register(ApplicationEventType.class,applicationBus); EventHandler containerBus=mock(EventHandler.class); dispatcher1.register(ContainerEventType.class,containerBus); ContainerExecutor exec=mock(ContainerExecutor.class); DeletionService delService=mock(DeletionService.class); LocalDirsHandlerService dirsHandler=new LocalDirsHandlerService(); dirsHandler.init(conf); dispatcher1.init(conf); dispatcher1.start(); ResourceLocalizationService rls=new ResourceLocalizationService(dispatcher1,exec,delService,localDirHandler,new NMNullStateStoreService()); dispatcher1.register(LocalizationEventType.class,rls); rls.init(conf); rls.handle(createApplicationLocalizationEvent(user,appId)); Container container1=createMockContainer(user,1); String localizerId1=container1.getContainerId().toString(); rls.getPrivateLocalizers().put(localizerId1,rls.new LocalizerRunner(new LocalizerContext(user,container1.getContainerId(),null),localizerId1)); LocalResourceRequest reqPriv=new LocalResourceRequest(new Path("file:///tmp1"),123L,LocalResourceType.FILE,LocalResourceVisibility.PRIVATE,""); List privList=new ArrayList(); privList.add(reqPriv); LocalResourceRequest reqApp=new LocalResourceRequest(new Path("file:///tmp2"),123L,LocalResourceType.FILE,LocalResourceVisibility.APPLICATION,""); List appList=new ArrayList(); appList.add(reqApp); Map> rsrcs=new HashMap>(); rsrcs.put(LocalResourceVisibility.APPLICATION,appList); rsrcs.put(LocalResourceVisibility.PRIVATE,privList); dispatcher1.getEventHandler().handle(new ContainerLocalizationRequestEvent(container1,rsrcs)); Assert.assertTrue(waitForPrivateDownloadToStart(rls,localizerId1,2,500)); String userCachePath=StringUtils.join(Path.SEPARATOR,Arrays.asList(localDirs.get(0).toUri().getRawPath(),ContainerLocalizer.USERCACHE,user,ContainerLocalizer.FILECACHE)); String userAppCachePath=StringUtils.join(Path.SEPARATOR,Arrays.asList(localDirs.get(0).toUri().getRawPath(),ContainerLocalizer.USERCACHE,user,ContainerLocalizer.APPCACHE,appId.toString(),ContainerLocalizer.FILECACHE)); int returnedResources=0; boolean appRsrc=false, privRsrc=false; while (returnedResources < 2) { LocalizerHeartbeatResponse response=rls.heartbeat(createLocalizerStatus(localizerId1)); for ( ResourceLocalizationSpec resourceSpec : response.getResourceSpecs()) { returnedResources++; Path destinationDirectory=new Path(resourceSpec.getDestinationDirectory().getFile()); if (resourceSpec.getResource().getVisibility() == LocalResourceVisibility.APPLICATION) { appRsrc=true; Assert.assertEquals(userAppCachePath,destinationDirectory.getParent().toUri().toString()); } else if (resourceSpec.getResource().getVisibility() == LocalResourceVisibility.PRIVATE) { privRsrc=true; Assert.assertEquals(userCachePath,destinationDirectory.getParent().toUri().toString()); } else { throw new Exception("Unexpected resource recevied."); } } } Assert.assertTrue(appRsrc && privRsrc); } finally { if (dispatcher1 != null) { dispatcher1.stop(); } } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) @SuppressWarnings("unchecked") public void testLocalizationHeartbeat() throws Exception { List localDirs=new ArrayList(); String[] sDirs=new String[1]; localDirs.add(lfs.makeQualified(new Path(basedir,0 + ""))); sDirs[0]=localDirs.get(0).toString(); conf.setStrings(YarnConfiguration.NM_LOCAL_DIRS,sDirs); conf.set(YarnConfiguration.NM_LOCAL_CACHE_MAX_FILES_PER_DIRECTORY,"37"); DrainDispatcher dispatcher=new DrainDispatcher(); dispatcher.init(conf); dispatcher.start(); EventHandler applicationBus=mock(EventHandler.class); dispatcher.register(ApplicationEventType.class,applicationBus); EventHandler containerBus=mock(EventHandler.class); dispatcher.register(ContainerEventType.class,containerBus); ContainerExecutor exec=mock(ContainerExecutor.class); LocalDirsHandlerService dirsHandler=new LocalDirsHandlerService(); dirsHandler.init(conf); DeletionService delServiceReal=new DeletionService(exec); DeletionService delService=spy(delServiceReal); delService.init(new Configuration()); delService.start(); ResourceLocalizationService rawService=new ResourceLocalizationService(dispatcher,exec,delService,dirsHandler,new NMNullStateStoreService()); ResourceLocalizationService spyService=spy(rawService); doReturn(mockServer).when(spyService).createServer(); doReturn(lfs).when(spyService).getLocalFileContext(isA(Configuration.class)); try { spyService.init(conf); spyService.start(); final Application app=mock(Application.class); final ApplicationId appId=BuilderUtils.newApplicationId(314159265358979L,3); when(app.getUser()).thenReturn("user0"); when(app.getAppId()).thenReturn(appId); spyService.handle(new ApplicationLocalizationEvent(LocalizationEventType.INIT_APPLICATION_RESOURCES,app)); ArgumentMatcher matchesAppInit=new ArgumentMatcher(){ @Override public boolean matches( Object o){ ApplicationEvent evt=(ApplicationEvent)o; return evt.getType() == ApplicationEventType.APPLICATION_INITED && appId == evt.getApplicationID(); } } ; dispatcher.await(); verify(applicationBus).handle(argThat(matchesAppInit)); Random r=new Random(); long seed=r.nextLong(); System.out.println("SEED: " + seed); r.setSeed(seed); final Container c=getMockContainer(appId,42,"user0"); FSDataOutputStream out=new FSDataOutputStream(new DataOutputBuffer(),null); doReturn(out).when(spylfs).createInternal(isA(Path.class),isA(EnumSet.class),isA(FsPermission.class),anyInt(),anyShort(),anyLong(),isA(Progressable.class),isA(ChecksumOpt.class),anyBoolean()); final LocalResource resource1=getPrivateMockedResource(r); LocalResource resource2=null; do { resource2=getPrivateMockedResource(r); } while (resource2 == null || resource2.equals(resource1)); final LocalResourceRequest req1=new LocalResourceRequest(resource1); final LocalResourceRequest req2=new LocalResourceRequest(resource2); Map> rsrcs=new HashMap>(); List privateResourceList=new ArrayList(); privateResourceList.add(req1); privateResourceList.add(req2); rsrcs.put(LocalResourceVisibility.PRIVATE,privateResourceList); spyService.handle(new ContainerLocalizationRequestEvent(c,rsrcs)); Thread.sleep(1000); dispatcher.await(); String appStr=ConverterUtils.toString(appId); String ctnrStr=c.getContainerId().toString(); ArgumentCaptor tokenPathCaptor=ArgumentCaptor.forClass(Path.class); verify(exec).startLocalizer(tokenPathCaptor.capture(),isA(InetSocketAddress.class),eq("user0"),eq(appStr),eq(ctnrStr),isA(List.class),isA(List.class)); Path localizationTokenPath=tokenPathCaptor.getValue(); LocalResourceStatus rsrcStat1=mock(LocalResourceStatus.class); LocalResourceStatus rsrcStat2=mock(LocalResourceStatus.class); LocalizerStatus stat=mock(LocalizerStatus.class); when(stat.getLocalizerId()).thenReturn(ctnrStr); when(rsrcStat1.getResource()).thenReturn(resource1); when(rsrcStat2.getResource()).thenReturn(resource2); when(rsrcStat1.getLocalSize()).thenReturn(4344L); when(rsrcStat2.getLocalSize()).thenReturn(2342L); URL locPath=getPath("/cache/private/blah"); when(rsrcStat1.getLocalPath()).thenReturn(locPath); when(rsrcStat2.getLocalPath()).thenReturn(locPath); when(rsrcStat1.getStatus()).thenReturn(ResourceStatusType.FETCH_SUCCESS); when(rsrcStat2.getStatus()).thenReturn(ResourceStatusType.FETCH_SUCCESS); when(stat.getResources()).thenReturn(Collections.emptyList()).thenReturn(Collections.singletonList(rsrcStat1)).thenReturn(Collections.singletonList(rsrcStat2)).thenReturn(Collections.emptyList()); String localPath=Path.SEPARATOR + ContainerLocalizer.USERCACHE + Path.SEPARATOR+ "user0"+ Path.SEPARATOR+ ContainerLocalizer.FILECACHE; LocalizerHeartbeatResponse response=spyService.heartbeat(stat); assertEquals(LocalizerAction.LIVE,response.getLocalizerAction()); assertEquals(1,response.getResourceSpecs().size()); assertEquals(req1,new LocalResourceRequest(response.getResourceSpecs().get(0).getResource())); URL localizedPath=response.getResourceSpecs().get(0).getDestinationDirectory(); assertTrue(localizedPath.getFile().endsWith(localPath + Path.SEPARATOR + "10")); response=spyService.heartbeat(stat); assertEquals(LocalizerAction.LIVE,response.getLocalizerAction()); assertEquals(1,response.getResourceSpecs().size()); assertEquals(req2,new LocalResourceRequest(response.getResourceSpecs().get(0).getResource())); localizedPath=response.getResourceSpecs().get(0).getDestinationDirectory(); assertTrue(localizedPath.getFile().endsWith(localPath + Path.SEPARATOR + "0"+ Path.SEPARATOR+ "11")); response=spyService.heartbeat(stat); assertEquals(LocalizerAction.LIVE,response.getLocalizerAction()); assertEquals(0,response.getResourceSpecs().size()); response=spyService.heartbeat(stat); assertEquals(LocalizerAction.DIE,response.getLocalizerAction()); dispatcher.await(); ArgumentMatcher matchesContainerLoc=new ArgumentMatcher(){ @Override public boolean matches( Object o){ ContainerEvent evt=(ContainerEvent)o; return evt.getType() == ContainerEventType.RESOURCE_LOCALIZED && c.getContainerId() == evt.getContainerID(); } } ; verify(containerBus,times(2)).handle(argThat(matchesContainerLoc)); verify(delService).delete((String)isNull(),eq(localizationTokenPath)); } finally { spyService.stop(); dispatcher.stop(); delService.stop(); } }

Class: org.apache.hadoop.yarn.server.nodemanager.containermanager.logaggregation.TestLogAggregationService

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test @SuppressWarnings("unchecked") public void testLocalFileDeletionAfterUpload() throws Exception { this.delSrvc=new DeletionService(createContainerExecutor()); delSrvc=spy(delSrvc); this.delSrvc.init(conf); this.conf.set(YarnConfiguration.NM_LOG_DIRS,localLogDir.getAbsolutePath()); this.conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR,this.remoteRootLogDir.getAbsolutePath()); DrainDispatcher dispatcher=createDispatcher(); EventHandler appEventHandler=mock(EventHandler.class); dispatcher.register(ApplicationEventType.class,appEventHandler); LogAggregationService logAggregationService=spy(new LogAggregationService(dispatcher,this.context,this.delSrvc,super.dirsHandler)); logAggregationService.init(this.conf); logAggregationService.start(); ApplicationId application1=BuilderUtils.newApplicationId(1234,1); File app1LogDir=new File(localLogDir,ConverterUtils.toString(application1)); app1LogDir.mkdir(); logAggregationService.handle(new LogHandlerAppStartedEvent(application1,this.user,null,ContainerLogsRetentionPolicy.ALL_CONTAINERS,this.acls)); ApplicationAttemptId appAttemptId=BuilderUtils.newApplicationAttemptId(application1,1); ContainerId container11=BuilderUtils.newContainerId(appAttemptId,1); writeContainerLogs(app1LogDir,container11); logAggregationService.handle(new LogHandlerContainerFinishedEvent(container11,0)); logAggregationService.handle(new LogHandlerAppFinishedEvent(application1)); logAggregationService.stop(); assertEquals(0,logAggregationService.getNumAggregators()); verify(logAggregationService).closeFileSystems(any(UserGroupInformation.class)); verify(delSrvc).delete(eq(user),eq((Path)null),eq(new Path(app1LogDir.getAbsolutePath()))); delSrvc.stop(); String containerIdStr=ConverterUtils.toString(container11); File containerLogDir=new File(app1LogDir,containerIdStr); for ( String fileType : new String[]{"stdout","stderr","syslog"}) { File f=new File(containerLogDir,fileType); Assert.assertFalse("check " + f,f.exists()); } Assert.assertFalse(app1LogDir.exists()); Path logFilePath=logAggregationService.getRemoteNodeLogFileForApp(application1,this.user); Assert.assertTrue("Log file [" + logFilePath + "] not found",new File(logFilePath.toUri().getPath()).exists()); dispatcher.await(); ApplicationEvent expectedEvents[]=new ApplicationEvent[]{new ApplicationEvent(appAttemptId.getApplicationId(),ApplicationEventType.APPLICATION_LOG_HANDLING_INITED),new ApplicationEvent(appAttemptId.getApplicationId(),ApplicationEventType.APPLICATION_LOG_HANDLING_FINISHED)}; checkEvents(appEventHandler,expectedEvents,true,"getType","getApplicationID"); dispatcher.stop(); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test @SuppressWarnings("unchecked") public void testNoContainerOnNode() throws Exception { this.conf.set(YarnConfiguration.NM_LOG_DIRS,localLogDir.getAbsolutePath()); this.conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR,this.remoteRootLogDir.getAbsolutePath()); DrainDispatcher dispatcher=createDispatcher(); EventHandler appEventHandler=mock(EventHandler.class); dispatcher.register(ApplicationEventType.class,appEventHandler); LogAggregationService logAggregationService=new LogAggregationService(dispatcher,this.context,this.delSrvc,super.dirsHandler); logAggregationService.init(this.conf); logAggregationService.start(); ApplicationId application1=BuilderUtils.newApplicationId(1234,1); File app1LogDir=new File(localLogDir,ConverterUtils.toString(application1)); app1LogDir.mkdir(); logAggregationService.handle(new LogHandlerAppStartedEvent(application1,this.user,null,ContainerLogsRetentionPolicy.ALL_CONTAINERS,this.acls)); logAggregationService.handle(new LogHandlerAppFinishedEvent(application1)); logAggregationService.stop(); assertEquals(0,logAggregationService.getNumAggregators()); Assert.assertFalse(new File(logAggregationService.getRemoteNodeLogFileForApp(application1,this.user).toUri().getPath()).exists()); dispatcher.await(); ApplicationEvent expectedEvents[]=new ApplicationEvent[]{new ApplicationEvent(application1,ApplicationEventType.APPLICATION_LOG_HANDLING_INITED),new ApplicationEvent(application1,ApplicationEventType.APPLICATION_LOG_HANDLING_FINISHED)}; checkEvents(appEventHandler,expectedEvents,true,"getType","getApplicationID"); dispatcher.stop(); }

Class: org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.TestContainersMonitor

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testContainerKillOnMemoryOverflow() throws IOException, InterruptedException, YarnException { if (!ProcfsBasedProcessTree.isAvailable()) { return; } containerManager.start(); File scriptFile=new File(tmpDir,"scriptFile.sh"); PrintWriter fileWriter=new PrintWriter(scriptFile); File processStartFile=new File(tmpDir,"start_file.txt").getAbsoluteFile(); fileWriter.write("\numask 0"); fileWriter.write("\necho Hello World! > " + processStartFile); fileWriter.write("\necho $$ >> " + processStartFile); fileWriter.write("\nsleep 15"); fileWriter.close(); ContainerLaunchContext containerLaunchContext=recordFactory.newRecordInstance(ContainerLaunchContext.class); ApplicationId appId=ApplicationId.newInstance(0,0); ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,1); ContainerId cId=ContainerId.newInstance(appAttemptId,0); int port=12345; URL resource_alpha=ConverterUtils.getYarnUrlFromPath(localFS.makeQualified(new Path(scriptFile.getAbsolutePath()))); LocalResource rsrc_alpha=recordFactory.newRecordInstance(LocalResource.class); rsrc_alpha.setResource(resource_alpha); rsrc_alpha.setSize(-1); rsrc_alpha.setVisibility(LocalResourceVisibility.APPLICATION); rsrc_alpha.setType(LocalResourceType.FILE); rsrc_alpha.setTimestamp(scriptFile.lastModified()); String destinationFile="dest_file"; Map localResources=new HashMap(); localResources.put(destinationFile,rsrc_alpha); containerLaunchContext.setLocalResources(localResources); List commands=new ArrayList(); commands.add("/bin/bash"); commands.add(scriptFile.getAbsolutePath()); containerLaunchContext.setCommands(commands); Resource r=BuilderUtils.newResource(8 * 1024 * 1024,1); ContainerTokenIdentifier containerIdentifier=new ContainerTokenIdentifier(cId,context.getNodeId().toString(),user,r,System.currentTimeMillis() + 120000,123,DUMMY_RM_IDENTIFIER,Priority.newInstance(0),0); Token containerToken=BuilderUtils.newContainerToken(context.getNodeId(),containerManager.getContext().getContainerTokenSecretManager().createPassword(containerIdentifier),containerIdentifier); StartContainerRequest scRequest=StartContainerRequest.newInstance(containerLaunchContext,containerToken); List list=new ArrayList(); list.add(scRequest); StartContainersRequest allRequests=StartContainersRequest.newInstance(list); containerManager.startContainers(allRequests); int timeoutSecs=0; while (!processStartFile.exists() && timeoutSecs++ < 20) { Thread.sleep(1000); LOG.info("Waiting for process start-file to be created"); } Assert.assertTrue("ProcessStartFile doesn't exist!",processStartFile.exists()); BufferedReader reader=new BufferedReader(new FileReader(processStartFile)); Assert.assertEquals("Hello World!",reader.readLine()); String pid=reader.readLine().trim(); Assert.assertEquals(null,reader.readLine()); BaseContainerManagerTest.waitForContainerState(containerManager,cId,ContainerState.COMPLETE,60); List containerIds=new ArrayList(); containerIds.add(cId); GetContainerStatusesRequest gcsRequest=GetContainerStatusesRequest.newInstance(containerIds); ContainerStatus containerStatus=containerManager.getContainerStatuses(gcsRequest).getContainerStatuses().get(0); Assert.assertEquals(ContainerExitStatus.KILLED_EXCEEDED_VMEM,containerStatus.getExitStatus()); String expectedMsgPattern="Container \\[pid=" + pid + ",containerID="+ cId+ "\\] is running beyond virtual memory limits. Current usage: "+ "[0-9.]+ ?[KMGTPE]?B of [0-9.]+ ?[KMGTPE]?B physical memory used; "+ "[0-9.]+ ?[KMGTPE]?B of [0-9.]+ ?[KMGTPE]?B virtual memory used. "+ "Killing container.\nDump of the process-tree for "+ cId+ " :\n"; Pattern pat=Pattern.compile(expectedMsgPattern); Assert.assertEquals("Expected message pattern is: " + expectedMsgPattern + "\n\nObserved message is: "+ containerStatus.getDiagnostics(),true,pat.matcher(containerStatus.getDiagnostics()).find()); Assert.assertFalse("Process is still alive!",exec.signalContainer(user,pid,Signal.NULL)); }

Class: org.apache.hadoop.yarn.server.nodemanager.recovery.TestNMLeveldbStateStoreService

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testRemoveLocalizedResource() throws IOException { String user="somebody"; ApplicationId appId=ApplicationId.newInstance(1,1); Path appRsrcPath=new Path("hdfs://some/app/resource"); LocalResourcePBImpl rsrcPb=(LocalResourcePBImpl)LocalResource.newInstance(ConverterUtils.getYarnUrlFromPath(appRsrcPath),LocalResourceType.ARCHIVE,LocalResourceVisibility.APPLICATION,123L,456L); LocalResourceProto appRsrcProto=rsrcPb.getProto(); Path appRsrcLocalPath=new Path("/some/local/dir/for/apprsrc"); stateStore.startResourceLocalization(user,appId,appRsrcProto,appRsrcLocalPath); LocalizedResourceProto appLocalizedProto=LocalizedResourceProto.newBuilder().setResource(appRsrcProto).setLocalPath(appRsrcLocalPath.toString()).setSize(1234567L).build(); stateStore.finishResourceLocalization(user,appId,appLocalizedProto); stateStore.removeLocalizedResource(user,appId,appRsrcLocalPath); restartStateStore(); verifyEmptyState(); stateStore.startResourceLocalization(user,appId,appRsrcProto,appRsrcLocalPath); stateStore.removeLocalizedResource(user,appId,appRsrcLocalPath); restartStateStore(); verifyEmptyState(); Path pubRsrcPath1=new Path("hdfs://some/public/resource1"); rsrcPb=(LocalResourcePBImpl)LocalResource.newInstance(ConverterUtils.getYarnUrlFromPath(pubRsrcPath1),LocalResourceType.FILE,LocalResourceVisibility.PUBLIC,789L,135L); LocalResourceProto pubRsrcProto1=rsrcPb.getProto(); Path pubRsrcLocalPath1=new Path("/some/local/dir/for/pubrsrc1"); stateStore.startResourceLocalization(null,null,pubRsrcProto1,pubRsrcLocalPath1); LocalizedResourceProto pubLocalizedProto1=LocalizedResourceProto.newBuilder().setResource(pubRsrcProto1).setLocalPath(pubRsrcLocalPath1.toString()).setSize(789L).build(); stateStore.finishResourceLocalization(null,null,pubLocalizedProto1); Path pubRsrcPath2=new Path("hdfs://some/public/resource2"); rsrcPb=(LocalResourcePBImpl)LocalResource.newInstance(ConverterUtils.getYarnUrlFromPath(pubRsrcPath2),LocalResourceType.FILE,LocalResourceVisibility.PUBLIC,789L,135L); LocalResourceProto pubRsrcProto2=rsrcPb.getProto(); Path pubRsrcLocalPath2=new Path("/some/local/dir/for/pubrsrc2"); stateStore.startResourceLocalization(null,null,pubRsrcProto2,pubRsrcLocalPath2); LocalizedResourceProto pubLocalizedProto2=LocalizedResourceProto.newBuilder().setResource(pubRsrcProto2).setLocalPath(pubRsrcLocalPath2.toString()).setSize(7654321L).build(); stateStore.finishResourceLocalization(null,null,pubLocalizedProto2); stateStore.removeLocalizedResource(null,null,pubRsrcLocalPath2); Path privRsrcPath=new Path("hdfs://some/private/resource"); rsrcPb=(LocalResourcePBImpl)LocalResource.newInstance(ConverterUtils.getYarnUrlFromPath(privRsrcPath),LocalResourceType.PATTERN,LocalResourceVisibility.PRIVATE,789L,680L,"*pattern*"); LocalResourceProto privRsrcProto=rsrcPb.getProto(); Path privRsrcLocalPath=new Path("/some/local/dir/for/privrsrc"); stateStore.startResourceLocalization(user,null,privRsrcProto,privRsrcLocalPath); stateStore.removeLocalizedResource(user,null,privRsrcLocalPath); restartStateStore(); RecoveredLocalizationState state=stateStore.loadLocalizationState(); LocalResourceTrackerState pubts=state.getPublicTrackerState(); assertTrue(pubts.getInProgressResources().isEmpty()); assertEquals(1,pubts.getLocalizedResources().size()); assertEquals(pubLocalizedProto1,pubts.getLocalizedResources().iterator().next()); Map userResources=state.getUserResources(); assertTrue(userResources.isEmpty()); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testApplicationStorage() throws IOException { RecoveredApplicationsState state=stateStore.loadApplicationsState(); assertTrue(state.getApplications().isEmpty()); assertTrue(state.getFinishedApplications().isEmpty()); final ApplicationId appId1=ApplicationId.newInstance(1234,1); ContainerManagerApplicationProto.Builder builder=ContainerManagerApplicationProto.newBuilder(); builder.setId(((ApplicationIdPBImpl)appId1).getProto()); builder.setUser("user1"); ContainerManagerApplicationProto appProto1=builder.build(); stateStore.storeApplication(appId1,appProto1); restartStateStore(); state=stateStore.loadApplicationsState(); assertEquals(1,state.getApplications().size()); assertEquals(appProto1,state.getApplications().get(0)); assertTrue(state.getFinishedApplications().isEmpty()); stateStore.storeFinishedApplication(appId1); final ApplicationId appId2=ApplicationId.newInstance(1234,2); builder=ContainerManagerApplicationProto.newBuilder(); builder.setId(((ApplicationIdPBImpl)appId2).getProto()); builder.setUser("user2"); ContainerManagerApplicationProto appProto2=builder.build(); stateStore.storeApplication(appId2,appProto2); restartStateStore(); state=stateStore.loadApplicationsState(); assertEquals(2,state.getApplications().size()); assertTrue(state.getApplications().contains(appProto1)); assertTrue(state.getApplications().contains(appProto2)); assertEquals(1,state.getFinishedApplications().size()); assertEquals(appId1,state.getFinishedApplications().get(0)); stateStore.storeFinishedApplication(appId2); stateStore.removeApplication(appId2); restartStateStore(); state=stateStore.loadApplicationsState(); assertEquals(1,state.getApplications().size()); assertEquals(appProto1,state.getApplications().get(0)); assertEquals(1,state.getFinishedApplications().size()); assertEquals(appId1,state.getFinishedApplications().get(0)); }

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testNMTokenStorage() throws IOException { RecoveredNMTokensState state=stateStore.loadNMTokensState(); assertNull(state.getCurrentMasterKey()); assertNull(state.getPreviousMasterKey()); assertTrue(state.getApplicationMasterKeys().isEmpty()); NMTokenSecretManagerForTest secretMgr=new NMTokenSecretManagerForTest(); MasterKey currentKey=secretMgr.generateKey(); stateStore.storeNMTokenCurrentMasterKey(currentKey); restartStateStore(); state=stateStore.loadNMTokensState(); assertEquals(currentKey,state.getCurrentMasterKey()); assertNull(state.getPreviousMasterKey()); assertTrue(state.getApplicationMasterKeys().isEmpty()); MasterKey prevKey=secretMgr.generateKey(); stateStore.storeNMTokenPreviousMasterKey(prevKey); restartStateStore(); state=stateStore.loadNMTokensState(); assertEquals(currentKey,state.getCurrentMasterKey()); assertEquals(prevKey,state.getPreviousMasterKey()); assertTrue(state.getApplicationMasterKeys().isEmpty()); ApplicationAttemptId attempt1=ApplicationAttemptId.newInstance(ApplicationId.newInstance(1,1),1); MasterKey attemptKey1=secretMgr.generateKey(); stateStore.storeNMTokenApplicationMasterKey(attempt1,attemptKey1); ApplicationAttemptId attempt2=ApplicationAttemptId.newInstance(ApplicationId.newInstance(2,3),4); MasterKey attemptKey2=secretMgr.generateKey(); stateStore.storeNMTokenApplicationMasterKey(attempt2,attemptKey2); restartStateStore(); state=stateStore.loadNMTokensState(); assertEquals(currentKey,state.getCurrentMasterKey()); assertEquals(prevKey,state.getPreviousMasterKey()); Map loadedAppKeys=state.getApplicationMasterKeys(); assertEquals(2,loadedAppKeys.size()); assertEquals(attemptKey1,loadedAppKeys.get(attempt1)); assertEquals(attemptKey2,loadedAppKeys.get(attempt2)); ApplicationAttemptId attempt3=ApplicationAttemptId.newInstance(ApplicationId.newInstance(5,6),7); MasterKey attemptKey3=secretMgr.generateKey(); stateStore.storeNMTokenApplicationMasterKey(attempt3,attemptKey3); stateStore.removeNMTokenApplicationMasterKey(attempt1); attemptKey2=prevKey; stateStore.storeNMTokenApplicationMasterKey(attempt2,attemptKey2); prevKey=currentKey; stateStore.storeNMTokenPreviousMasterKey(prevKey); currentKey=secretMgr.generateKey(); stateStore.storeNMTokenCurrentMasterKey(currentKey); restartStateStore(); state=stateStore.loadNMTokensState(); assertEquals(currentKey,state.getCurrentMasterKey()); assertEquals(prevKey,state.getPreviousMasterKey()); loadedAppKeys=state.getApplicationMasterKeys(); assertEquals(2,loadedAppKeys.size()); assertNull(loadedAppKeys.get(attempt1)); assertEquals(attemptKey2,loadedAppKeys.get(attempt2)); assertEquals(attemptKey3,loadedAppKeys.get(attempt3)); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testContainerStorage() throws IOException { List recoveredContainers=stateStore.loadContainersState(); assertTrue(recoveredContainers.isEmpty()); ApplicationId appId=ApplicationId.newInstance(1234,3); ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,4); ContainerId containerId=ContainerId.newInstance(appAttemptId,5); LocalResource lrsrc=LocalResource.newInstance(URL.newInstance("hdfs","somehost",12345,"/some/path/to/rsrc"),LocalResourceType.FILE,LocalResourceVisibility.APPLICATION,123L,1234567890L); Map localResources=new HashMap(); localResources.put("rsrc",lrsrc); Map env=new HashMap(); env.put("somevar","someval"); List containerCmds=new ArrayList(); containerCmds.add("somecmd"); containerCmds.add("somearg"); Map serviceData=new HashMap(); serviceData.put("someservice",ByteBuffer.wrap(new byte[]{0x1,0x2,0x3})); ByteBuffer containerTokens=ByteBuffer.wrap(new byte[]{0x7,0x8,0x9,0xa}); Map acls=new HashMap(); acls.put(ApplicationAccessType.VIEW_APP,"viewuser"); acls.put(ApplicationAccessType.MODIFY_APP,"moduser"); ContainerLaunchContext clc=ContainerLaunchContext.newInstance(localResources,env,containerCmds,serviceData,containerTokens,acls); Resource containerRsrc=Resource.newInstance(1357,3); ContainerTokenIdentifier containerTokenId=new ContainerTokenIdentifier(containerId,"host","user",containerRsrc,9876543210L,42,2468,Priority.newInstance(7),13579); Token containerToken=Token.newInstance(containerTokenId.getBytes(),ContainerTokenIdentifier.KIND.toString(),"password".getBytes(),"tokenservice"); StartContainerRequest containerReq=StartContainerRequest.newInstance(clc,containerToken); stateStore.storeContainer(containerId,containerReq); restartStateStore(); recoveredContainers=stateStore.loadContainersState(); assertEquals(1,recoveredContainers.size()); RecoveredContainerState rcs=recoveredContainers.get(0); assertEquals(RecoveredContainerStatus.REQUESTED,rcs.getStatus()); assertEquals(ContainerExitStatus.INVALID,rcs.getExitCode()); assertEquals(false,rcs.getKilled()); assertEquals(containerReq,rcs.getStartRequest()); assertTrue(rcs.getDiagnostics().isEmpty()); StringBuilder diags=new StringBuilder(); stateStore.storeContainerLaunched(containerId); diags.append("some diags for container"); stateStore.storeContainerDiagnostics(containerId,diags); restartStateStore(); recoveredContainers=stateStore.loadContainersState(); assertEquals(1,recoveredContainers.size()); rcs=recoveredContainers.get(0); assertEquals(RecoveredContainerStatus.LAUNCHED,rcs.getStatus()); assertEquals(ContainerExitStatus.INVALID,rcs.getExitCode()); assertEquals(false,rcs.getKilled()); assertEquals(containerReq,rcs.getStartRequest()); assertEquals(diags.toString(),rcs.getDiagnostics()); diags.append("some more diags for container"); stateStore.storeContainerDiagnostics(containerId,diags); stateStore.storeContainerKilled(containerId); restartStateStore(); recoveredContainers=stateStore.loadContainersState(); assertEquals(1,recoveredContainers.size()); rcs=recoveredContainers.get(0); assertEquals(RecoveredContainerStatus.LAUNCHED,rcs.getStatus()); assertEquals(ContainerExitStatus.INVALID,rcs.getExitCode()); assertTrue(rcs.getKilled()); assertEquals(containerReq,rcs.getStartRequest()); assertEquals(diags.toString(),rcs.getDiagnostics()); diags.append("some final diags"); stateStore.storeContainerDiagnostics(containerId,diags); stateStore.storeContainerCompleted(containerId,21); restartStateStore(); recoveredContainers=stateStore.loadContainersState(); assertEquals(1,recoveredContainers.size()); rcs=recoveredContainers.get(0); assertEquals(RecoveredContainerStatus.COMPLETED,rcs.getStatus()); assertEquals(21,rcs.getExitCode()); assertTrue(rcs.getKilled()); assertEquals(containerReq,rcs.getStartRequest()); assertEquals(diags.toString(),rcs.getDiagnostics()); stateStore.removeContainer(containerId); restartStateStore(); recoveredContainers=stateStore.loadContainersState(); assertTrue(recoveredContainers.isEmpty()); }

UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testCheckVersion() throws IOException { Version defaultVersion=stateStore.getCurrentVersion(); Assert.assertEquals(defaultVersion,stateStore.loadVersion()); Version compatibleVersion=Version.newInstance(defaultVersion.getMajorVersion(),defaultVersion.getMinorVersion() + 2); stateStore.storeVersion(compatibleVersion); Assert.assertEquals(compatibleVersion,stateStore.loadVersion()); restartStateStore(); Assert.assertEquals(defaultVersion,stateStore.loadVersion()); Version incompatibleVersion=Version.newInstance(defaultVersion.getMajorVersion() + 1,defaultVersion.getMinorVersion()); stateStore.storeVersion(incompatibleVersion); try { restartStateStore(); Assert.fail("Incompatible version, should expect fail here."); } catch ( ServiceStateException e) { Assert.assertTrue("Exception message mismatch",e.getMessage().contains("Incompatible version for NM state:")); } }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testDeletionTaskStorage() throws IOException { RecoveredDeletionServiceState state=stateStore.loadDeletionServiceState(); assertTrue(state.getTasks().isEmpty()); DeletionServiceDeleteTaskProto proto=DeletionServiceDeleteTaskProto.newBuilder().setId(7).setUser("someuser").setSubdir("some/subdir").addBasedirs("some/dir/path").addBasedirs("some/other/dir/path").setDeletionTime(123456L).addSuccessorIds(8).addSuccessorIds(9).build(); stateStore.storeDeletionTask(proto.getId(),proto); restartStateStore(); state=stateStore.loadDeletionServiceState(); assertEquals(1,state.getTasks().size()); assertEquals(proto,state.getTasks().get(0)); DeletionServiceDeleteTaskProto proto2=DeletionServiceDeleteTaskProto.newBuilder().setId(8).setUser("user2").setSubdir("subdir2").setDeletionTime(789L).build(); stateStore.storeDeletionTask(proto2.getId(),proto2); restartStateStore(); state=stateStore.loadDeletionServiceState(); assertEquals(2,state.getTasks().size()); assertTrue(state.getTasks().contains(proto)); assertTrue(state.getTasks().contains(proto2)); stateStore.removeDeletionTask(proto2.getId()); restartStateStore(); state=stateStore.loadDeletionServiceState(); assertEquals(1,state.getTasks().size()); assertEquals(proto,state.getTasks().get(0)); stateStore.removeDeletionTask(proto.getId()); restartStateStore(); state=stateStore.loadDeletionServiceState(); assertTrue(state.getTasks().isEmpty()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testStartResourceLocalization() throws IOException { String user="somebody"; ApplicationId appId=ApplicationId.newInstance(1,1); Path appRsrcPath=new Path("hdfs://some/app/resource"); LocalResourcePBImpl rsrcPb=(LocalResourcePBImpl)LocalResource.newInstance(ConverterUtils.getYarnUrlFromPath(appRsrcPath),LocalResourceType.ARCHIVE,LocalResourceVisibility.APPLICATION,123L,456L); LocalResourceProto appRsrcProto=rsrcPb.getProto(); Path appRsrcLocalPath=new Path("/some/local/dir/for/apprsrc"); stateStore.startResourceLocalization(user,appId,appRsrcProto,appRsrcLocalPath); restartStateStore(); RecoveredLocalizationState state=stateStore.loadLocalizationState(); LocalResourceTrackerState pubts=state.getPublicTrackerState(); assertTrue(pubts.getLocalizedResources().isEmpty()); assertTrue(pubts.getInProgressResources().isEmpty()); Map userResources=state.getUserResources(); assertEquals(1,userResources.size()); RecoveredUserResources rur=userResources.get(user); LocalResourceTrackerState privts=rur.getPrivateTrackerState(); assertNotNull(privts); assertTrue(privts.getLocalizedResources().isEmpty()); assertTrue(privts.getInProgressResources().isEmpty()); assertEquals(1,rur.getAppTrackerStates().size()); LocalResourceTrackerState appts=rur.getAppTrackerStates().get(appId); assertNotNull(appts); assertTrue(appts.getLocalizedResources().isEmpty()); assertEquals(1,appts.getInProgressResources().size()); assertEquals(appRsrcLocalPath,appts.getInProgressResources().get(appRsrcProto)); Path pubRsrcPath1=new Path("hdfs://some/public/resource1"); rsrcPb=(LocalResourcePBImpl)LocalResource.newInstance(ConverterUtils.getYarnUrlFromPath(pubRsrcPath1),LocalResourceType.FILE,LocalResourceVisibility.PUBLIC,789L,135L); LocalResourceProto pubRsrcProto1=rsrcPb.getProto(); Path pubRsrcLocalPath1=new Path("/some/local/dir/for/pubrsrc1"); stateStore.startResourceLocalization(null,null,pubRsrcProto1,pubRsrcLocalPath1); Path pubRsrcPath2=new Path("hdfs://some/public/resource2"); rsrcPb=(LocalResourcePBImpl)LocalResource.newInstance(ConverterUtils.getYarnUrlFromPath(pubRsrcPath2),LocalResourceType.FILE,LocalResourceVisibility.PUBLIC,789L,135L); LocalResourceProto pubRsrcProto2=rsrcPb.getProto(); Path pubRsrcLocalPath2=new Path("/some/local/dir/for/pubrsrc2"); stateStore.startResourceLocalization(null,null,pubRsrcProto2,pubRsrcLocalPath2); Path privRsrcPath=new Path("hdfs://some/private/resource"); rsrcPb=(LocalResourcePBImpl)LocalResource.newInstance(ConverterUtils.getYarnUrlFromPath(privRsrcPath),LocalResourceType.PATTERN,LocalResourceVisibility.PRIVATE,789L,680L,"*pattern*"); LocalResourceProto privRsrcProto=rsrcPb.getProto(); Path privRsrcLocalPath=new Path("/some/local/dir/for/privrsrc"); stateStore.startResourceLocalization(user,null,privRsrcProto,privRsrcLocalPath); restartStateStore(); state=stateStore.loadLocalizationState(); pubts=state.getPublicTrackerState(); assertTrue(pubts.getLocalizedResources().isEmpty()); assertEquals(2,pubts.getInProgressResources().size()); assertEquals(pubRsrcLocalPath1,pubts.getInProgressResources().get(pubRsrcProto1)); assertEquals(pubRsrcLocalPath2,pubts.getInProgressResources().get(pubRsrcProto2)); userResources=state.getUserResources(); assertEquals(1,userResources.size()); rur=userResources.get(user); privts=rur.getPrivateTrackerState(); assertNotNull(privts); assertTrue(privts.getLocalizedResources().isEmpty()); assertEquals(1,privts.getInProgressResources().size()); assertEquals(privRsrcLocalPath,privts.getInProgressResources().get(privRsrcProto)); assertEquals(1,rur.getAppTrackerStates().size()); appts=rur.getAppTrackerStates().get(appId); assertNotNull(appts); assertTrue(appts.getLocalizedResources().isEmpty()); assertEquals(1,appts.getInProgressResources().size()); assertEquals(appRsrcLocalPath,appts.getInProgressResources().get(appRsrcProto)); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testFinishResourceLocalization() throws IOException { String user="somebody"; ApplicationId appId=ApplicationId.newInstance(1,1); Path appRsrcPath=new Path("hdfs://some/app/resource"); LocalResourcePBImpl rsrcPb=(LocalResourcePBImpl)LocalResource.newInstance(ConverterUtils.getYarnUrlFromPath(appRsrcPath),LocalResourceType.ARCHIVE,LocalResourceVisibility.APPLICATION,123L,456L); LocalResourceProto appRsrcProto=rsrcPb.getProto(); Path appRsrcLocalPath=new Path("/some/local/dir/for/apprsrc"); stateStore.startResourceLocalization(user,appId,appRsrcProto,appRsrcLocalPath); LocalizedResourceProto appLocalizedProto=LocalizedResourceProto.newBuilder().setResource(appRsrcProto).setLocalPath(appRsrcLocalPath.toString()).setSize(1234567L).build(); stateStore.finishResourceLocalization(user,appId,appLocalizedProto); restartStateStore(); RecoveredLocalizationState state=stateStore.loadLocalizationState(); LocalResourceTrackerState pubts=state.getPublicTrackerState(); assertTrue(pubts.getLocalizedResources().isEmpty()); assertTrue(pubts.getInProgressResources().isEmpty()); Map userResources=state.getUserResources(); assertEquals(1,userResources.size()); RecoveredUserResources rur=userResources.get(user); LocalResourceTrackerState privts=rur.getPrivateTrackerState(); assertNotNull(privts); assertTrue(privts.getLocalizedResources().isEmpty()); assertTrue(privts.getInProgressResources().isEmpty()); assertEquals(1,rur.getAppTrackerStates().size()); LocalResourceTrackerState appts=rur.getAppTrackerStates().get(appId); assertNotNull(appts); assertTrue(appts.getInProgressResources().isEmpty()); assertEquals(1,appts.getLocalizedResources().size()); assertEquals(appLocalizedProto,appts.getLocalizedResources().iterator().next()); Path pubRsrcPath1=new Path("hdfs://some/public/resource1"); rsrcPb=(LocalResourcePBImpl)LocalResource.newInstance(ConverterUtils.getYarnUrlFromPath(pubRsrcPath1),LocalResourceType.FILE,LocalResourceVisibility.PUBLIC,789L,135L); LocalResourceProto pubRsrcProto1=rsrcPb.getProto(); Path pubRsrcLocalPath1=new Path("/some/local/dir/for/pubrsrc1"); stateStore.startResourceLocalization(null,null,pubRsrcProto1,pubRsrcLocalPath1); Path pubRsrcPath2=new Path("hdfs://some/public/resource2"); rsrcPb=(LocalResourcePBImpl)LocalResource.newInstance(ConverterUtils.getYarnUrlFromPath(pubRsrcPath2),LocalResourceType.FILE,LocalResourceVisibility.PUBLIC,789L,135L); LocalResourceProto pubRsrcProto2=rsrcPb.getProto(); Path pubRsrcLocalPath2=new Path("/some/local/dir/for/pubrsrc2"); stateStore.startResourceLocalization(null,null,pubRsrcProto2,pubRsrcLocalPath2); Path privRsrcPath=new Path("hdfs://some/private/resource"); rsrcPb=(LocalResourcePBImpl)LocalResource.newInstance(ConverterUtils.getYarnUrlFromPath(privRsrcPath),LocalResourceType.PATTERN,LocalResourceVisibility.PRIVATE,789L,680L,"*pattern*"); LocalResourceProto privRsrcProto=rsrcPb.getProto(); Path privRsrcLocalPath=new Path("/some/local/dir/for/privrsrc"); stateStore.startResourceLocalization(user,null,privRsrcProto,privRsrcLocalPath); LocalizedResourceProto pubLocalizedProto1=LocalizedResourceProto.newBuilder().setResource(pubRsrcProto1).setLocalPath(pubRsrcLocalPath1.toString()).setSize(pubRsrcProto1.getSize()).build(); stateStore.finishResourceLocalization(null,null,pubLocalizedProto1); LocalizedResourceProto privLocalizedProto=LocalizedResourceProto.newBuilder().setResource(privRsrcProto).setLocalPath(privRsrcLocalPath.toString()).setSize(privRsrcProto.getSize()).build(); stateStore.finishResourceLocalization(user,null,privLocalizedProto); restartStateStore(); state=stateStore.loadLocalizationState(); pubts=state.getPublicTrackerState(); assertEquals(1,pubts.getLocalizedResources().size()); assertEquals(pubLocalizedProto1,pubts.getLocalizedResources().iterator().next()); assertEquals(1,pubts.getInProgressResources().size()); assertEquals(pubRsrcLocalPath2,pubts.getInProgressResources().get(pubRsrcProto2)); userResources=state.getUserResources(); assertEquals(1,userResources.size()); rur=userResources.get(user); privts=rur.getPrivateTrackerState(); assertNotNull(privts); assertEquals(1,privts.getLocalizedResources().size()); assertEquals(privLocalizedProto,privts.getLocalizedResources().iterator().next()); assertTrue(privts.getInProgressResources().isEmpty()); assertEquals(1,rur.getAppTrackerStates().size()); appts=rur.getAppTrackerStates().get(appId); assertNotNull(appts); assertTrue(appts.getInProgressResources().isEmpty()); assertEquals(1,appts.getLocalizedResources().size()); assertEquals(appLocalizedProto,appts.getLocalizedResources().iterator().next()); }

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testContainerTokenStorage() throws IOException { RecoveredContainerTokensState state=stateStore.loadContainerTokensState(); assertNull(state.getCurrentMasterKey()); assertNull(state.getPreviousMasterKey()); assertTrue(state.getActiveTokens().isEmpty()); ContainerTokenKeyGeneratorForTest keygen=new ContainerTokenKeyGeneratorForTest(new YarnConfiguration()); MasterKey currentKey=keygen.generateKey(); stateStore.storeContainerTokenCurrentMasterKey(currentKey); restartStateStore(); state=stateStore.loadContainerTokensState(); assertEquals(currentKey,state.getCurrentMasterKey()); assertNull(state.getPreviousMasterKey()); assertTrue(state.getActiveTokens().isEmpty()); MasterKey prevKey=keygen.generateKey(); stateStore.storeContainerTokenPreviousMasterKey(prevKey); restartStateStore(); state=stateStore.loadContainerTokensState(); assertEquals(currentKey,state.getCurrentMasterKey()); assertEquals(prevKey,state.getPreviousMasterKey()); assertTrue(state.getActiveTokens().isEmpty()); ContainerId cid1=BuilderUtils.newContainerId(1,1,1,1); Long expTime1=1234567890L; ContainerId cid2=BuilderUtils.newContainerId(2,2,2,2); Long expTime2=9876543210L; stateStore.storeContainerToken(cid1,expTime1); stateStore.storeContainerToken(cid2,expTime2); restartStateStore(); state=stateStore.loadContainerTokensState(); assertEquals(currentKey,state.getCurrentMasterKey()); assertEquals(prevKey,state.getPreviousMasterKey()); Map loadedActiveTokens=state.getActiveTokens(); assertEquals(2,loadedActiveTokens.size()); assertEquals(expTime1,loadedActiveTokens.get(cid1)); assertEquals(expTime2,loadedActiveTokens.get(cid2)); ContainerId cid3=BuilderUtils.newContainerId(3,3,3,3); Long expTime3=135798642L; stateStore.storeContainerToken(cid3,expTime3); stateStore.removeContainerToken(cid1); expTime2+=246897531L; stateStore.storeContainerToken(cid2,expTime2); prevKey=currentKey; stateStore.storeContainerTokenPreviousMasterKey(prevKey); currentKey=keygen.generateKey(); stateStore.storeContainerTokenCurrentMasterKey(currentKey); restartStateStore(); state=stateStore.loadContainerTokensState(); assertEquals(currentKey,state.getCurrentMasterKey()); assertEquals(prevKey,state.getPreviousMasterKey()); loadedActiveTokens=state.getActiveTokens(); assertEquals(2,loadedActiveTokens.size()); assertNull(loadedActiveTokens.get(cid1)); assertEquals(expTime2,loadedActiveTokens.get(cid2)); assertEquals(expTime3,loadedActiveTokens.get(cid3)); }

Class: org.apache.hadoop.yarn.server.nodemanager.security.TestNMContainerTokenSecretManager

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testRecovery() throws IOException { YarnConfiguration conf=new YarnConfiguration(); conf.setBoolean(YarnConfiguration.NM_RECOVERY_ENABLED,true); final NodeId nodeId=NodeId.newInstance("somehost",1234); final ContainerId cid1=BuilderUtils.newContainerId(1,1,1,1); final ContainerId cid2=BuilderUtils.newContainerId(2,2,2,2); ContainerTokenKeyGeneratorForTest keygen=new ContainerTokenKeyGeneratorForTest(conf); NMMemoryStateStoreService stateStore=new NMMemoryStateStoreService(); stateStore.init(conf); stateStore.start(); NMContainerTokenSecretManager secretMgr=new NMContainerTokenSecretManager(conf,stateStore); secretMgr.setNodeId(nodeId); MasterKey currentKey=keygen.generateKey(); secretMgr.setMasterKey(currentKey); ContainerTokenIdentifier tokenId1=createContainerTokenId(cid1,nodeId,"user1",secretMgr); ContainerTokenIdentifier tokenId2=createContainerTokenId(cid2,nodeId,"user2",secretMgr); assertNotNull(secretMgr.retrievePassword(tokenId1)); assertNotNull(secretMgr.retrievePassword(tokenId2)); secretMgr=new NMContainerTokenSecretManager(conf,stateStore); secretMgr.setNodeId(nodeId); secretMgr.recover(); assertEquals(currentKey,secretMgr.getCurrentKey()); assertTrue(secretMgr.isValidStartContainerRequest(tokenId1)); assertTrue(secretMgr.isValidStartContainerRequest(tokenId2)); assertNotNull(secretMgr.retrievePassword(tokenId1)); assertNotNull(secretMgr.retrievePassword(tokenId2)); secretMgr.startContainerSuccessful(tokenId2); currentKey=keygen.generateKey(); secretMgr.setMasterKey(currentKey); secretMgr=new NMContainerTokenSecretManager(conf,stateStore); secretMgr.setNodeId(nodeId); secretMgr.recover(); assertEquals(currentKey,secretMgr.getCurrentKey()); assertTrue(secretMgr.isValidStartContainerRequest(tokenId1)); assertFalse(secretMgr.isValidStartContainerRequest(tokenId2)); assertNotNull(secretMgr.retrievePassword(tokenId1)); assertNotNull(secretMgr.retrievePassword(tokenId2)); currentKey=keygen.generateKey(); secretMgr.setMasterKey(currentKey); secretMgr=new NMContainerTokenSecretManager(conf,stateStore); secretMgr.setNodeId(nodeId); secretMgr.recover(); assertEquals(currentKey,secretMgr.getCurrentKey()); assertTrue(secretMgr.isValidStartContainerRequest(tokenId1)); assertFalse(secretMgr.isValidStartContainerRequest(tokenId2)); try { secretMgr.retrievePassword(tokenId1); fail("token should not be valid"); } catch ( InvalidToken e) { } try { secretMgr.retrievePassword(tokenId2); fail("token should not be valid"); } catch ( InvalidToken e) { } stateStore.close(); }

Class: org.apache.hadoop.yarn.server.nodemanager.security.TestNMTokenSecretManagerInNM

UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testRecovery() throws IOException { YarnConfiguration conf=new YarnConfiguration(); conf.setBoolean(YarnConfiguration.NM_RECOVERY_ENABLED,true); final NodeId nodeId=NodeId.newInstance("somehost",1234); final ApplicationAttemptId attempt1=ApplicationAttemptId.newInstance(ApplicationId.newInstance(1,1),1); final ApplicationAttemptId attempt2=ApplicationAttemptId.newInstance(ApplicationId.newInstance(2,2),2); NMTokenKeyGeneratorForTest keygen=new NMTokenKeyGeneratorForTest(); NMMemoryStateStoreService stateStore=new NMMemoryStateStoreService(); stateStore.init(conf); stateStore.start(); NMTokenSecretManagerInNM secretMgr=new NMTokenSecretManagerInNM(stateStore); secretMgr.setNodeId(nodeId); MasterKey currentKey=keygen.generateKey(); secretMgr.setMasterKey(currentKey); NMTokenIdentifier attemptToken1=getNMTokenId(secretMgr.createNMToken(attempt1,nodeId,"user1")); NMTokenIdentifier attemptToken2=getNMTokenId(secretMgr.createNMToken(attempt2,nodeId,"user2")); secretMgr.appAttemptStartContainer(attemptToken1); secretMgr.appAttemptStartContainer(attemptToken2); assertTrue(secretMgr.isAppAttemptNMTokenKeyPresent(attempt1)); assertTrue(secretMgr.isAppAttemptNMTokenKeyPresent(attempt2)); assertNotNull(secretMgr.retrievePassword(attemptToken1)); assertNotNull(secretMgr.retrievePassword(attemptToken2)); secretMgr=new NMTokenSecretManagerInNM(stateStore); secretMgr.recover(); secretMgr.setNodeId(nodeId); assertEquals(currentKey,secretMgr.getCurrentKey()); assertTrue(secretMgr.isAppAttemptNMTokenKeyPresent(attempt1)); assertTrue(secretMgr.isAppAttemptNMTokenKeyPresent(attempt2)); assertNotNull(secretMgr.retrievePassword(attemptToken1)); assertNotNull(secretMgr.retrievePassword(attemptToken2)); currentKey=keygen.generateKey(); secretMgr.setMasterKey(currentKey); secretMgr.appFinished(attempt1.getApplicationId()); secretMgr=new NMTokenSecretManagerInNM(stateStore); secretMgr.recover(); secretMgr.setNodeId(nodeId); assertEquals(currentKey,secretMgr.getCurrentKey()); assertFalse(secretMgr.isAppAttemptNMTokenKeyPresent(attempt1)); assertTrue(secretMgr.isAppAttemptNMTokenKeyPresent(attempt2)); assertNotNull(secretMgr.retrievePassword(attemptToken1)); assertNotNull(secretMgr.retrievePassword(attemptToken2)); currentKey=keygen.generateKey(); secretMgr.setMasterKey(currentKey); secretMgr=new NMTokenSecretManagerInNM(stateStore); secretMgr.recover(); secretMgr.setNodeId(nodeId); assertEquals(currentKey,secretMgr.getCurrentKey()); assertFalse(secretMgr.isAppAttemptNMTokenKeyPresent(attempt1)); assertTrue(secretMgr.isAppAttemptNMTokenKeyPresent(attempt2)); try { secretMgr.retrievePassword(attemptToken1); fail("attempt token should not still be valid"); } catch ( InvalidToken e) { } assertNotNull(secretMgr.retrievePassword(attemptToken2)); secretMgr.appFinished(attempt2.getApplicationId()); secretMgr=new NMTokenSecretManagerInNM(stateStore); secretMgr.recover(); secretMgr.setNodeId(nodeId); assertEquals(currentKey,secretMgr.getCurrentKey()); assertFalse(secretMgr.isAppAttemptNMTokenKeyPresent(attempt1)); assertFalse(secretMgr.isAppAttemptNMTokenKeyPresent(attempt2)); try { secretMgr.retrievePassword(attemptToken1); fail("attempt token should not still be valid"); } catch ( InvalidToken e) { } try { secretMgr.retrievePassword(attemptToken2); fail("attempt token should not still be valid"); } catch ( InvalidToken e) { } stateStore.close(); }

Class: org.apache.hadoop.yarn.server.nodemanager.webapp.TestContainerLogsPage

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier 
@Test(timeout=30000) public void testContainerLogDirs() throws IOException, YarnException { File absLogDir=new File("target",TestNMWebServer.class.getSimpleName() + "LogDir").getAbsoluteFile(); String logdirwithFile=absLogDir.toURI().toString(); Configuration conf=new Configuration(); conf.set(YarnConfiguration.NM_LOG_DIRS,logdirwithFile); NodeHealthCheckerService healthChecker=new NodeHealthCheckerService(); healthChecker.init(conf); LocalDirsHandlerService dirsHandler=healthChecker.getDiskHandler(); NMContext nmContext=new NodeManager.NMContext(null,null,dirsHandler,new ApplicationACLsManager(conf),new NMNullStateStoreService()); RecordFactory recordFactory=RecordFactoryProvider.getRecordFactory(conf); String user="nobody"; long clusterTimeStamp=1234; ApplicationId appId=BuilderUtils.newApplicationId(recordFactory,clusterTimeStamp,1); Application app=mock(Application.class); when(app.getUser()).thenReturn(user); when(app.getAppId()).thenReturn(appId); ApplicationAttemptId appAttemptId=BuilderUtils.newApplicationAttemptId(appId,1); ContainerId container1=BuilderUtils.newContainerId(recordFactory,appId,appAttemptId,0); nmContext.getApplications().put(appId,app); MockContainer container=new MockContainer(appAttemptId,new AsyncDispatcher(),conf,user,appId,1); container.setState(ContainerState.RUNNING); nmContext.getContainers().put(container1,container); List files=null; files=ContainerLogsUtils.getContainerLogDirs(container1,user,nmContext); Assert.assertTrue(!(files.get(0).toString().contains("file:"))); nmContext.getContainers().remove(container1); Assert.assertNull(nmContext.getContainers().get(container1)); files=ContainerLogsUtils.getContainerLogDirs(container1,user,nmContext); Assert.assertTrue(!(files.get(0).toString().contains("file:"))); }

Class: org.apache.hadoop.yarn.server.nodemanager.webapp.TestNMWebServices

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testInvalidUri2() throws JSONException, Exception { WebResource r=resource(); String responseStr=""; try { responseStr=r.accept(MediaType.APPLICATION_JSON).get(String.class); fail("should have thrown exception on invalid uri"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.NOT_FOUND,response.getClientResponseStatus()); WebServicesTestUtils.checkStringMatch("error string exists and shouldn't","",responseStr); } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testInvalidUri() throws JSONException, Exception { WebResource r=resource(); String responseStr=""; try { responseStr=r.path("ws").path("v1").path("node").path("bogus").accept(MediaType.APPLICATION_JSON).get(String.class); fail("should have thrown exception on invalid uri"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.NOT_FOUND,response.getClientResponseStatus()); WebServicesTestUtils.checkStringMatch("error string exists and shouldn't","",responseStr); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testContainerLogs() throws IOException { WebResource r=resource(); final ContainerId containerId=BuilderUtils.newContainerId(0,0,0,0); final String containerIdStr=BuilderUtils.newContainerId(0,0,0,0).toString(); final ApplicationAttemptId appAttemptId=containerId.getApplicationAttemptId(); final ApplicationId appId=appAttemptId.getApplicationId(); final String appIdStr=appId.toString(); final String filename="logfile1"; final String logMessage="log message\n"; nmContext.getApplications().put(appId,new ApplicationImpl(null,"user",appId,null,nmContext)); MockContainer container=new MockContainer(appAttemptId,new AsyncDispatcher(),new Configuration(),"user",appId,1); container.setState(ContainerState.RUNNING); nmContext.getContainers().put(containerId,container); Path path=dirsHandler.getLogPathForWrite(ContainerLaunch.getRelativeContainerLogDir(appIdStr,containerIdStr) + "/" + filename,false); File logFile=new File(path.toUri().getPath()); logFile.deleteOnExit(); assertTrue("Failed to create log dir",logFile.getParentFile().mkdirs()); PrintWriter pw=new PrintWriter(logFile); pw.print(logMessage); pw.close(); ClientResponse response=r.path("ws").path("v1").path("node").path("containerlogs").path(containerIdStr).path(filename).accept(MediaType.TEXT_PLAIN).get(ClientResponse.class); String responseText=response.getEntity(String.class); assertEquals(logMessage,responseText); response=r.path("ws").path("v1").path("node").path("containerlogs").path(containerIdStr).path("uhhh").accept(MediaType.TEXT_PLAIN).get(ClientResponse.class); Assert.assertEquals(Status.NOT_FOUND.getStatusCode(),response.getStatus()); responseText=response.getEntity(String.class); assertTrue(responseText.contains("Cannot find this log on the local disk.")); nmContext.getContainers().remove(containerId); Assert.assertNull(nmContext.getContainers().get(containerId)); response=r.path("ws").path("v1").path("node").path("containerlogs").path(containerIdStr).path(filename).accept(MediaType.TEXT_PLAIN).get(ClientResponse.class); responseText=response.getEntity(String.class); assertEquals(logMessage,responseText); }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testInvalidAccept() throws JSONException, Exception { WebResource r=resource(); String responseStr=""; try { responseStr=r.path("ws").path("v1").path("node").accept(MediaType.TEXT_PLAIN).get(String.class); fail("should have thrown exception on invalid uri"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.INTERNAL_SERVER_ERROR,response.getClientResponseStatus()); WebServicesTestUtils.checkStringMatch("error string exists and shouldn't","",responseStr); } }

Class: org.apache.hadoop.yarn.server.nodemanager.webapp.TestNMWebServicesApps

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testNodeSingleAppsMissing() throws JSONException, Exception { WebResource r=resource(); Application app=new MockApp(1); nmContext.getApplications().put(app.getAppId(),app); addAppContainers(app); Application app2=new MockApp(2); nmContext.getApplications().put(app2.getAppId(),app2); addAppContainers(app2); try { r.path("ws").path("v1").path("node").path("apps").path("application_1234_0009").accept(MediaType.APPLICATION_JSON).get(JSONObject.class); fail("should have thrown exception on invalid user query"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.NOT_FOUND,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: app with id application_1234_0009 not found",message); WebServicesTestUtils.checkStringMatch("exception type","NotFoundException",type); WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.NotFoundException",classname); } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testNodeAppsUserEmpty() throws JSONException, Exception { WebResource r=resource(); Application app=new MockApp(1); nmContext.getApplications().put(app.getAppId(),app); addAppContainers(app); Application app2=new MockApp("foo",1234,2); nmContext.getApplications().put(app2.getAppId(),app2); addAppContainers(app2); try { r.path("ws").path("v1").path("node").path("apps").queryParam("user","").accept(MediaType.APPLICATION_JSON).get(JSONObject.class); fail("should have thrown exception on invalid user query"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: Error: You must specify a non-empty string for the user",message); WebServicesTestUtils.checkStringMatch("exception type","BadRequestException",type); WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.BadRequestException",classname); } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testNodeAppsStateInvalidDefault() throws JSONException, Exception { WebResource r=resource(); Application app=new MockApp(1); nmContext.getApplications().put(app.getAppId(),app); addAppContainers(app); Application app2=new MockApp("foo",1234,2); nmContext.getApplications().put(app2.getAppId(),app2); addAppContainers(app2); try { r.path("ws").path("v1").path("node").path("apps").queryParam("state","FOO_STATE").get(JSONObject.class); fail("should have thrown exception on invalid user query"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); verifyStateInvalidException(message,type,classname); } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testNodeAppsStateInvalid() throws JSONException, Exception { WebResource r=resource(); Application app=new MockApp(1); nmContext.getApplications().put(app.getAppId(),app); addAppContainers(app); Application app2=new MockApp("foo",1234,2); nmContext.getApplications().put(app2.getAppId(),app2); addAppContainers(app2); try { r.path("ws").path("v1").path("node").path("apps").queryParam("state","FOO_STATE").accept(MediaType.APPLICATION_JSON).get(JSONObject.class); fail("should have thrown exception on invalid user query"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); verifyStateInvalidException(message,type,classname); } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testNodeSingleAppsInvalid() throws JSONException, Exception { WebResource r=resource(); Application app=new MockApp(1); nmContext.getApplications().put(app.getAppId(),app); addAppContainers(app); Application app2=new MockApp(2); nmContext.getApplications().put(app2.getAppId(),app2); addAppContainers(app2); try { r.path("ws").path("v1").path("node").path("apps").path("app_foo_0000").accept(MediaType.APPLICATION_JSON).get(JSONObject.class); fail("should have thrown exception on invalid user query"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); WebServicesTestUtils.checkStringMatch("exception message","For input string: \"foo\"",message); WebServicesTestUtils.checkStringMatch("exception type","NumberFormatException",type); WebServicesTestUtils.checkStringMatch("exception classname","java.lang.NumberFormatException",classname); } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testNodeAppsStateInvalidXML() throws JSONException, Exception { WebResource r=resource(); Application app=new MockApp(1); nmContext.getApplications().put(app.getAppId(),app); addAppContainers(app); Application app2=new MockApp("foo",1234,2); nmContext.getApplications().put(app2.getAppId(),app2); addAppContainers(app2); try { r.path("ws").path("v1").path("node").path("apps").queryParam("state","FOO_STATE").accept(MediaType.APPLICATION_XML).get(JSONObject.class); fail("should have thrown exception on invalid user query"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType()); String msg=response.getEntity(String.class); DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance(); DocumentBuilder db=dbf.newDocumentBuilder(); InputSource is=new InputSource(); is.setCharacterStream(new StringReader(msg)); Document dom=db.parse(is); NodeList nodes=dom.getElementsByTagName("RemoteException"); Element element=(Element)nodes.item(0); String message=WebServicesTestUtils.getXmlString(element,"message"); String type=WebServicesTestUtils.getXmlString(element,"exception"); String classname=WebServicesTestUtils.getXmlString(element,"javaClassName"); verifyStateInvalidException(message,type,classname); } }

Class: org.apache.hadoop.yarn.server.nodemanager.webapp.TestNMWebServicesContainers

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testSingleContainerInvalid() throws JSONException, Exception { WebResource r=resource(); Application app=new MockApp(1); nmContext.getApplications().put(app.getAppId(),app); addAppContainers(app); Application app2=new MockApp(2); nmContext.getApplications().put(app2.getAppId(),app2); addAppContainers(app2); try { r.path("ws").path("v1").path("node").path("containers").path("container_foo_1234").accept(MediaType.APPLICATION_JSON).get(JSONObject.class); fail("should have thrown exception on invalid user query"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: invalid container id, container_foo_1234",message); WebServicesTestUtils.checkStringMatch("exception type","BadRequestException",type); WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.BadRequestException",classname); } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testSingleContainerWrong() throws JSONException, Exception { WebResource r=resource(); Application app=new MockApp(1); nmContext.getApplications().put(app.getAppId(),app); addAppContainers(app); Application app2=new MockApp(2); nmContext.getApplications().put(app2.getAppId(),app2); addAppContainers(app2); try { r.path("ws").path("v1").path("node").path("containers").path("container_1234_0001_01_000005").accept(MediaType.APPLICATION_JSON).get(JSONObject.class); fail("should have thrown exception on invalid user query"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.NOT_FOUND,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: container with id, container_1234_0001_01_000005, not found",message); WebServicesTestUtils.checkStringMatch("exception type","NotFoundException",type); WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.NotFoundException",classname); } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testSingleContainerInvalid2() throws JSONException, Exception { WebResource r=resource(); Application app=new MockApp(1); nmContext.getApplications().put(app.getAppId(),app); addAppContainers(app); Application app2=new MockApp(2); nmContext.getApplications().put(app2.getAppId(),app2); addAppContainers(app2); try { r.path("ws").path("v1").path("node").path("containers").path("container_1234_0001").accept(MediaType.APPLICATION_JSON).get(JSONObject.class); fail("should have thrown exception on invalid user query"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: invalid container id, container_1234_0001",message); WebServicesTestUtils.checkStringMatch("exception type","BadRequestException",type); WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.BadRequestException",classname); } }

Class: org.apache.hadoop.yarn.server.resourcemanager.TestAMAuthorization

APIUtilityVerifier BranchVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testAuthorizedAccess() throws Exception { MyContainerManager containerManager=new MyContainerManager(); rm=new MockRMWithAMS(conf,containerManager); rm.start(); MockNM nm1=rm.registerNode("localhost:1234",5120); Map acls=new HashMap(2); acls.put(ApplicationAccessType.VIEW_APP,"*"); RMApp app=rm.submitApp(1024,"appname","appuser",acls); nm1.nodeHeartbeat(true); int waitCount=0; while (containerManager.containerTokens == null && waitCount++ < 20) { LOG.info("Waiting for AM Launch to happen.."); Thread.sleep(1000); } Assert.assertNotNull(containerManager.containerTokens); RMAppAttempt attempt=app.getCurrentAppAttempt(); ApplicationAttemptId applicationAttemptId=attempt.getAppAttemptId(); waitForLaunchedState(attempt); final Configuration conf=rm.getConfig(); final YarnRPC rpc=YarnRPC.create(conf); UserGroupInformation currentUser=UserGroupInformation.createRemoteUser(applicationAttemptId.toString()); Credentials credentials=containerManager.getContainerCredentials(); final InetSocketAddress rmBindAddress=rm.getApplicationMasterService().getBindAddress(); Token amRMToken=MockRMWithAMS.setupAndReturnAMRMToken(rmBindAddress,credentials.getAllTokens()); currentUser.addToken(amRMToken); ApplicationMasterProtocol client=currentUser.doAs(new PrivilegedAction(){ @Override public ApplicationMasterProtocol run(){ return (ApplicationMasterProtocol)rpc.getProxy(ApplicationMasterProtocol.class,rm.getApplicationMasterService().getBindAddress(),conf); } } ); RegisterApplicationMasterRequest request=Records.newRecord(RegisterApplicationMasterRequest.class); RegisterApplicationMasterResponse response=client.registerApplicationMaster(request); Assert.assertNotNull(response.getClientToAMTokenMasterKey()); if (UserGroupInformation.isSecurityEnabled()) { Assert.assertTrue(response.getClientToAMTokenMasterKey().array().length > 0); } Assert.assertEquals("Register response has bad ACLs","*",response.getApplicationACLs().get(ApplicationAccessType.VIEW_APP)); }

BranchVerifier UtilityVerifier BooleanVerifier NullVerifier HybridVerifier 
@Test public void testUnauthorizedAccess() throws Exception { MyContainerManager containerManager=new MyContainerManager(); rm=new MockRMWithAMS(conf,containerManager); rm.start(); MockNM nm1=rm.registerNode("localhost:1234",5120); RMApp app=rm.submitApp(1024); nm1.nodeHeartbeat(true); int waitCount=0; while (containerManager.containerTokens == null && waitCount++ < 40) { LOG.info("Waiting for AM Launch to happen.."); Thread.sleep(1000); } Assert.assertNotNull(containerManager.containerTokens); RMAppAttempt attempt=app.getCurrentAppAttempt(); ApplicationAttemptId applicationAttemptId=attempt.getAppAttemptId(); waitForLaunchedState(attempt); final Configuration conf=rm.getConfig(); final YarnRPC rpc=YarnRPC.create(conf); final InetSocketAddress serviceAddr=conf.getSocketAddr(YarnConfiguration.RM_SCHEDULER_ADDRESS,YarnConfiguration.DEFAULT_RM_SCHEDULER_ADDRESS,YarnConfiguration.DEFAULT_RM_SCHEDULER_PORT); UserGroupInformation currentUser=UserGroupInformation.createRemoteUser(applicationAttemptId.toString()); ApplicationMasterProtocol client=currentUser.doAs(new PrivilegedAction(){ @Override public ApplicationMasterProtocol run(){ return (ApplicationMasterProtocol)rpc.getProxy(ApplicationMasterProtocol.class,serviceAddr,conf); } } ); RegisterApplicationMasterRequest request=Records.newRecord(RegisterApplicationMasterRequest.class); try { client.registerApplicationMaster(request); Assert.fail("Should fail with authorization error"); } catch ( Exception e) { if (isCause(AccessControlException.class,e)) { String expectedMessage=""; if (UserGroupInformation.isSecurityEnabled()) { expectedMessage="Client cannot authenticate via:[TOKEN]"; } else { expectedMessage="SIMPLE authentication is not enabled. Available:[TOKEN]"; } Assert.assertTrue(e.getCause().getMessage().contains(expectedMessage)); } else { throw e; } } }

Class: org.apache.hadoop.yarn.server.resourcemanager.TestAppManager

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testRMAppSubmitDuplicateApplicationId() throws Exception { ApplicationId appId=MockApps.newAppID(0); asContext.setApplicationId(appId); RMApp appOrig=rmContext.getRMApps().get(appId); Assert.assertTrue("app name matches but shouldn't","testApp1" != appOrig.getName()); try { appMonitor.submitApplication(asContext,"test"); Assert.fail("Exception is expected when applicationId is duplicate."); } catch ( YarnException e) { Assert.assertTrue("The thrown exception is not the expectd one.",e.getMessage().contains("Cannot add a duplicate!")); } RMApp app=rmContext.getRMApps().get(appId); Assert.assertNotNull("app is null",app); Assert.assertEquals("app id doesn't match",appId,app.getApplicationId()); Assert.assertEquals("app state doesn't match",RMAppState.FINISHED,app.getState()); }

UtilityVerifier BooleanVerifier HybridVerifier 
@Test(timeout=30000) public void testRMAppSubmitInvalidResourceRequest() throws Exception { asContext.setResource(Resources.createResource(YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB + 1)); try { appMonitor.submitApplication(asContext,"test"); Assert.fail("Application submission should fail because resource" + " request is invalid."); } catch ( YarnException e) { Assert.assertTrue("The thrown exception is not" + " InvalidResourceRequestException",e.getMessage().contains("Invalid resource request")); } }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testRMAppSubmit() throws Exception { appMonitor.submitApplication(asContext,"test"); RMApp app=rmContext.getRMApps().get(appId); Assert.assertNotNull("app is null",app); Assert.assertEquals("app id doesn't match",appId,app.getApplicationId()); Assert.assertEquals("app state doesn't match",RMAppState.NEW,app.getState()); int timeoutSecs=0; while ((getAppEventType() == RMAppEventType.KILL) && timeoutSecs++ < 20) { Thread.sleep(1000); } Assert.assertEquals("app event type sent is wrong",RMAppEventType.START,getAppEventType()); }

Class: org.apache.hadoop.yarn.server.resourcemanager.TestApplicationCleanup

TestInitializer BooleanVerifier HybridVerifier 
@Before public void setup() throws UnknownHostException { Logger rootLogger=LogManager.getRootLogger(); rootLogger.setLevel(Level.DEBUG); conf=new YarnConfiguration(); UserGroupInformation.setConfiguration(conf); conf.set(YarnConfiguration.RECOVERY_ENABLED,"true"); conf.set(YarnConfiguration.RM_STORE,MemoryRMStateStore.class.getName()); Assert.assertTrue(YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS > 1); }

Class: org.apache.hadoop.yarn.server.resourcemanager.TestApplicationMasterLauncher

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testAMLaunchAndCleanup() throws Exception { Logger rootLogger=LogManager.getRootLogger(); rootLogger.setLevel(Level.DEBUG); MyContainerManagerImpl containerManager=new MyContainerManagerImpl(); MockRMWithCustomAMLauncher rm=new MockRMWithCustomAMLauncher(containerManager); rm.start(); MockNM nm1=rm.registerNode("127.0.0.1:1234",5120); RMApp app=rm.submitApp(2000); nm1.nodeHeartbeat(true); int waitCount=0; while (containerManager.launched == false && waitCount++ < 20) { LOG.info("Waiting for AM Launch to happen.."); Thread.sleep(1000); } Assert.assertTrue(containerManager.launched); RMAppAttempt attempt=app.getCurrentAppAttempt(); ApplicationAttemptId appAttemptId=attempt.getAppAttemptId(); Assert.assertEquals(appAttemptId.toString(),containerManager.attemptIdAtContainerManager); Assert.assertEquals(app.getSubmitTime(),containerManager.submitTimeAtContainerManager); Assert.assertEquals(app.getRMAppAttempt(appAttemptId).getMasterContainer().getId().toString(),containerManager.containerIdAtContainerManager); Assert.assertEquals(nm1.getNodeId().toString(),containerManager.nmHostAtContainerManager); Assert.assertEquals(YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS,containerManager.maxAppAttempts); MockAM am=new MockAM(rm.getRMContext(),rm.getApplicationMasterService(),appAttemptId); am.registerAppAttempt(); am.unregisterAppAttempt(); nm1.nodeHeartbeat(attempt.getAppAttemptId(),1,ContainerState.COMPLETE); am.waitForState(RMAppAttemptState.FINISHED); waitCount=0; while (containerManager.cleanedup == false && waitCount++ < 20) { LOG.info("Waiting for AM Cleanup to happen.."); Thread.sleep(1000); } Assert.assertTrue(containerManager.cleanedup); am.waitForState(RMAppAttemptState.FINISHED); rm.stop(); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@SuppressWarnings("unused") @Test(timeout=100000) public void testallocateBeforeAMRegistration() throws Exception { Logger rootLogger=LogManager.getRootLogger(); boolean thrown=false; rootLogger.setLevel(Level.DEBUG); MockRM rm=new MockRM(); rm.start(); MockNM nm1=rm.registerNode("h1:1234",5000); RMApp app=rm.submitApp(2000); nm1.nodeHeartbeat(true); RMAppAttempt attempt=app.getCurrentAppAttempt(); MockAM am=rm.sendAMLaunched(attempt.getAppAttemptId()); int request=2; AllocateResponse ar=am.allocate("h1",1000,request,new ArrayList()); Assert.assertTrue(ar.getAMCommand() == AMCommand.AM_RESYNC); nm1.nodeHeartbeat(true); AllocateResponse amrs=am.allocate(new ArrayList(),new ArrayList()); Assert.assertTrue(ar.getAMCommand() == AMCommand.AM_RESYNC); am.registerAppAttempt(); thrown=false; try { am.registerAppAttempt(false); } catch ( Exception e) { Assert.assertEquals("Application Master is already registered : " + attempt.getAppAttemptId().getApplicationId(),e.getMessage()); thrown=true; } Assert.assertTrue(thrown); am.unregisterAppAttempt(); nm1.nodeHeartbeat(attempt.getAppAttemptId(),1,ContainerState.COMPLETE); am.waitForState(RMAppAttemptState.FINISHED); AllocateResponse amrs2=am.allocate(new ArrayList(),new ArrayList()); Assert.assertTrue(amrs2.getAMCommand() == AMCommand.AM_SHUTDOWN); }

Class: org.apache.hadoop.yarn.server.resourcemanager.TestApplicationMasterService

APIUtilityVerifier BooleanVerifier NullVerifier HybridVerifier 
@Test(timeout=1200000) public void testFinishApplicationMasterBeforeRegistering() throws Exception { MockRM rm=new MockRM(conf); try { rm.start(); MockNM nm1=rm.registerNode("127.0.0.1:1234",6 * GB); RMApp app1=rm.submitApp(2048); MockAM am1=MockRM.launchAM(app1,rm,nm1); FinishApplicationMasterRequest req=FinishApplicationMasterRequest.newInstance(FinalApplicationStatus.FAILED,"",""); Throwable cause=null; try { am1.unregisterAppAttempt(req,false); } catch ( Exception e) { cause=e.getCause(); } Assert.assertNotNull(cause); Assert.assertTrue(cause instanceof ApplicationMasterNotRegisteredException); Assert.assertNotNull(cause.getMessage()); Assert.assertTrue(cause.getMessage().contains("Application Master is trying to unregister before registering for:")); am1.registerAppAttempt(); am1.unregisterAppAttempt(req,false); } finally { if (rm != null) { rm.stop(); } } }

UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
@Test(timeout=600000) public void testInvalidContainerReleaseRequest() throws Exception { MockRM rm=new MockRM(conf); try { rm.start(); MockNM nm1=rm.registerNode("127.0.0.1:1234",6 * GB); RMApp app1=rm.submitApp(1024); nm1.nodeHeartbeat(true); RMAppAttempt attempt1=app1.getCurrentAppAttempt(); MockAM am1=rm.sendAMLaunched(attempt1.getAppAttemptId()); am1.registerAppAttempt(); am1.addRequests(new String[]{"127.0.0.1"},GB,1,1); AllocateResponse alloc1Response=am1.schedule(); nm1.nodeHeartbeat(true); while (alloc1Response.getAllocatedContainers().size() < 1) { LOG.info("Waiting for containers to be created for app 1..."); sleep(1000); alloc1Response=am1.schedule(); } Assert.assertTrue(alloc1Response.getAllocatedContainers().size() > 0); RMApp app2=rm.submitApp(1024); nm1.nodeHeartbeat(true); RMAppAttempt attempt2=app2.getCurrentAppAttempt(); MockAM am2=rm.sendAMLaunched(attempt2.getAppAttemptId()); am2.registerAppAttempt(); ContainerId cId=alloc1Response.getAllocatedContainers().get(0).getId(); am2.addContainerToBeReleased(cId); try { am2.schedule(); Assert.fail("Exception was expected!!"); } catch ( InvalidContainerReleaseException e) { StringBuilder sb=new StringBuilder("Cannot release container : "); sb.append(cId.toString()); sb.append(" not belonging to this application attempt : "); sb.append(attempt2.getAppAttemptId().toString()); Assert.assertTrue(e.getMessage().contains(sb.toString())); } } finally { if (rm != null) { rm.stop(); } } }

Class: org.apache.hadoop.yarn.server.resourcemanager.TestClientRMService

APIUtilityVerifier UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testForceKillNonExistingApplication() throws YarnException { RMContext rmContext=mock(RMContext.class); when(rmContext.getRMApps()).thenReturn(new ConcurrentHashMap()); ClientRMService rmService=new ClientRMService(rmContext,null,null,null,null,null); ApplicationId applicationId=BuilderUtils.newApplicationId(System.currentTimeMillis(),0); KillApplicationRequest request=KillApplicationRequest.newInstance(applicationId); try { rmService.forceKillApplication(request); Assert.fail(); } catch ( ApplicationNotFoundException ex) { Assert.assertEquals(ex.getMessage(),"Trying to kill an absent " + "application " + request.getApplicationId()); } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testGetApplicationReport() throws YarnException { RMContext rmContext=mock(RMContext.class); when(rmContext.getRMApps()).thenReturn(new ConcurrentHashMap()); ClientRMService rmService=new ClientRMService(rmContext,null,null,null,null,null); RecordFactory recordFactory=RecordFactoryProvider.getRecordFactory(null); GetApplicationReportRequest request=recordFactory.newRecordInstance(GetApplicationReportRequest.class); request.setApplicationId(ApplicationId.newInstance(0,0)); try { rmService.getApplicationReport(request); Assert.fail(); } catch ( ApplicationNotFoundException ex) { Assert.assertEquals(ex.getMessage(),"Application with id '" + request.getApplicationId() + "' doesn't exist in RM."); } }

APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testGetContainerReport() throws YarnException, IOException { ClientRMService rmService=createRMService(); RecordFactory recordFactory=RecordFactoryProvider.getRecordFactory(null); GetContainerReportRequest request=recordFactory.newRecordInstance(GetContainerReportRequest.class); ApplicationAttemptId attemptId=ApplicationAttemptId.newInstance(ApplicationId.newInstance(123456,1),1); ContainerId containerId=ContainerId.newInstance(attemptId,1); request.setContainerId(containerId); try { GetContainerReportResponse response=rmService.getContainerReport(request); Assert.assertEquals(containerId,response.getContainerReport().getContainerId()); } catch ( ApplicationNotFoundException ex) { Assert.fail(ex.getMessage()); } }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testForceKillApplication() throws Exception { YarnConfiguration conf=new YarnConfiguration(); MockRM rm=new MockRM(); rm.init(conf); rm.start(); ClientRMService rmService=rm.getClientRMService(); GetApplicationsRequest getRequest=GetApplicationsRequest.newInstance(EnumSet.of(YarnApplicationState.KILLED)); RMApp app1=rm.submitApp(1024); RMApp app2=rm.submitApp(1024,true); assertEquals("Incorrect number of apps in the RM",0,rmService.getApplications(getRequest).getApplicationList().size()); KillApplicationRequest killRequest1=KillApplicationRequest.newInstance(app1.getApplicationId()); KillApplicationRequest killRequest2=KillApplicationRequest.newInstance(app2.getApplicationId()); int killAttemptCount=0; for (int i=0; i < 100; i++) { KillApplicationResponse killResponse1=rmService.forceKillApplication(killRequest1); killAttemptCount++; if (killResponse1.getIsKillCompleted()) { break; } Thread.sleep(10); } assertTrue("Kill attempt count should be greater than 1 for managed AMs",killAttemptCount > 1); assertEquals("Incorrect number of apps in the RM",1,rmService.getApplications(getRequest).getApplicationList().size()); KillApplicationResponse killResponse2=rmService.forceKillApplication(killRequest2); assertTrue("Killing UnmanagedAM should falsely acknowledge true",killResponse2.getIsKillCompleted()); for (int i=0; i < 100; i++) { if (2 == rmService.getApplications(getRequest).getApplicationList().size()) { break; } Thread.sleep(10); } assertEquals("Incorrect number of apps in the RM",2,rmService.getApplications(getRequest).getApplicationList().size()); }

APIUtilityVerifier UtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) @SuppressWarnings("rawtypes") public void testAppSubmit() throws Exception { YarnScheduler yarnScheduler=mockYarnScheduler(); RMContext rmContext=mock(RMContext.class); mockRMContext(yarnScheduler,rmContext); RMStateStore stateStore=mock(RMStateStore.class); when(rmContext.getStateStore()).thenReturn(stateStore); RMAppManager appManager=new RMAppManager(rmContext,yarnScheduler,null,mock(ApplicationACLsManager.class),new Configuration()); when(rmContext.getDispatcher().getEventHandler()).thenReturn(new EventHandler(){ public void handle( Event event){ } } ); ApplicationId appId1=getApplicationId(100); ApplicationACLsManager mockAclsManager=mock(ApplicationACLsManager.class); when(mockAclsManager.checkAccess(UserGroupInformation.getCurrentUser(),ApplicationAccessType.VIEW_APP,null,appId1)).thenReturn(true); QueueACLsManager mockQueueACLsManager=mock(QueueACLsManager.class); when(mockQueueACLsManager.checkAccess(any(UserGroupInformation.class),any(QueueACL.class),anyString())).thenReturn(true); ClientRMService rmService=new ClientRMService(rmContext,yarnScheduler,appManager,mockAclsManager,mockQueueACLsManager,null); SubmitApplicationRequest submitRequest1=mockSubmitAppRequest(appId1,null,null); try { rmService.submitApplication(submitRequest1); } catch ( YarnException e) { Assert.fail("Exception is not expected."); } RMApp app1=rmContext.getRMApps().get(appId1); Assert.assertNotNull("app doesn't exist",app1); Assert.assertEquals("app name doesn't match",YarnConfiguration.DEFAULT_APPLICATION_NAME,app1.getName()); Assert.assertEquals("app queue doesn't match",YarnConfiguration.DEFAULT_QUEUE_NAME,app1.getQueue()); String name=MockApps.newAppName(); String queue=MockApps.newQueue(); ApplicationId appId2=getApplicationId(101); SubmitApplicationRequest submitRequest2=mockSubmitAppRequest(appId2,name,queue); submitRequest2.getApplicationSubmissionContext().setApplicationType("matchType"); try { rmService.submitApplication(submitRequest2); } catch ( YarnException e) { Assert.fail("Exception is not expected."); } RMApp app2=rmContext.getRMApps().get(appId2); Assert.assertNotNull("app doesn't exist",app2); Assert.assertEquals("app name doesn't match",name,app2.getName()); Assert.assertEquals("app queue doesn't match",queue,app2.getQueue()); try { rmService.submitApplication(submitRequest2); } catch ( YarnException e) { Assert.fail("Exception is not expected."); } GetApplicationsRequest getAllAppsRequest=GetApplicationsRequest.newInstance(new HashSet()); GetApplicationsResponse getAllApplicationsResponse=rmService.getApplications(getAllAppsRequest); Assert.assertEquals(5,getAllApplicationsResponse.getApplicationList().size()); Set appTypes=new HashSet(); appTypes.add("matchType"); getAllAppsRequest=GetApplicationsRequest.newInstance(appTypes); getAllApplicationsResponse=rmService.getApplications(getAllAppsRequest); Assert.assertEquals(1,getAllApplicationsResponse.getApplicationList().size()); Assert.assertEquals(appId2,getAllApplicationsResponse.getApplicationList().get(0).getApplicationId()); }

UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
@Test public void testTokenRenewalWrongUser() throws Exception { try { owner.doAs(new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { try { checkTokenRenewal(owner,other); return null; } catch ( YarnException ex) { Assert.assertTrue(ex.getMessage().contains(owner.getUserName() + " tries to renew a token with renewer " + other.getUserName())); throw ex; } } } ); } catch ( Exception e) { return; } Assert.fail("renew should have failed"); }

APIUtilityVerifier InternalCallVerifier IdentityVerifier EqualityVerifier HybridVerifier 
@Test public void testGetClusterNodes() throws Exception { MockRM rm=new MockRM(){ protected ClientRMService createClientRMService(){ return new ClientRMService(this.rmContext,scheduler,this.rmAppManager,this.applicationACLsManager,this.queueACLsManager,this.getRMContext().getRMDelegationTokenSecretManager()); } } ; rm.start(); MockNM node=rm.registerNode("host1:1234",1024); rm.sendNodeStarted(node); node.nodeHeartbeat(true); MockNM lostNode=rm.registerNode("host2:1235",1024); rm.sendNodeStarted(lostNode); lostNode.nodeHeartbeat(true); rm.NMwaitForState(lostNode.getNodeId(),NodeState.RUNNING); rm.sendNodeLost(lostNode); Configuration conf=new Configuration(); YarnRPC rpc=YarnRPC.create(conf); InetSocketAddress rmAddress=rm.getClientRMService().getBindAddress(); LOG.info("Connecting to ResourceManager at " + rmAddress); ApplicationClientProtocol client=(ApplicationClientProtocol)rpc.getProxy(ApplicationClientProtocol.class,rmAddress,conf); GetClusterNodesRequest request=GetClusterNodesRequest.newInstance(EnumSet.of(NodeState.RUNNING)); List nodeReports=client.getClusterNodes(request).getNodeReports(); Assert.assertEquals(1,nodeReports.size()); Assert.assertNotSame("Node is expected to be healthy!",NodeState.UNHEALTHY,nodeReports.get(0).getNodeState()); node.nodeHeartbeat(false); nodeReports=client.getClusterNodes(request).getNodeReports(); Assert.assertEquals("Unhealthy nodes should not show up by default",0,nodeReports.size()); request=GetClusterNodesRequest.newInstance(EnumSet.of(NodeState.UNHEALTHY)); nodeReports=client.getClusterNodes(request).getNodeReports(); Assert.assertEquals(1,nodeReports.size()); Assert.assertEquals("Node is expected to be unhealthy!",NodeState.UNHEALTHY,nodeReports.get(0).getNodeState()); rm.registerNode("host3:1236",1024); request=GetClusterNodesRequest.newInstance(EnumSet.allOf(NodeState.class)); nodeReports=client.getClusterNodes(request).getNodeReports(); Assert.assertEquals(3,nodeReports.size()); }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testGetApplicationAttemptReport() throws YarnException, IOException { ClientRMService rmService=createRMService(); RecordFactory recordFactory=RecordFactoryProvider.getRecordFactory(null); GetApplicationAttemptReportRequest request=recordFactory.newRecordInstance(GetApplicationAttemptReportRequest.class); ApplicationAttemptId attemptId=ApplicationAttemptId.newInstance(ApplicationId.newInstance(123456,1),1); request.setApplicationAttemptId(attemptId); try { GetApplicationAttemptReportResponse response=rmService.getApplicationAttemptReport(request); Assert.assertEquals(attemptId,response.getApplicationAttemptReport().getApplicationAttemptId()); } catch ( ApplicationNotFoundException ex) { Assert.fail(ex.getMessage()); } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testGetApplicationAttempts() throws YarnException, IOException { ClientRMService rmService=createRMService(); RecordFactory recordFactory=RecordFactoryProvider.getRecordFactory(null); GetApplicationAttemptsRequest request=recordFactory.newRecordInstance(GetApplicationAttemptsRequest.class); ApplicationAttemptId attemptId=ApplicationAttemptId.newInstance(ApplicationId.newInstance(123456,1),1); request.setApplicationId(ApplicationId.newInstance(123456,1)); try { GetApplicationAttemptsResponse response=rmService.getApplicationAttempts(request); Assert.assertEquals(1,response.getApplicationAttemptList().size()); Assert.assertEquals(attemptId,response.getApplicationAttemptList().get(0).getApplicationAttemptId()); } catch ( ApplicationNotFoundException ex) { Assert.fail(ex.getMessage()); } }

UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
@Test public void testTokenCancellationByWrongUser(){ RMContext rmContext=mock(RMContext.class); final ClientRMService rmService=new ClientRMService(rmContext,null,null,null,null,dtsm); UserGroupInformation[] kerbTestOwners={owner,other,tester,ownerKerb,otherKerb}; UserGroupInformation[] kerbTestRenewers={owner,other,ownerKerb,otherKerb}; for ( final UserGroupInformation tokOwner : kerbTestOwners) { for ( final UserGroupInformation tokRenewer : kerbTestRenewers) { try { testerKerb.doAs(new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { try { checkTokenCancellation(rmService,tokOwner,tokRenewer); Assert.fail("We should not reach here; token owner = " + tokOwner.getUserName() + ", renewer = "+ tokRenewer.getUserName()); return null; } catch ( YarnException e) { Assert.assertTrue(e.getMessage().contains(testerKerb.getUserName() + " is not authorized to cancel the token")); return null; } } } ); } catch ( Exception e) { Assert.fail("Unexpected exception; " + e.getMessage()); } } } UserGroupInformation[] simpleTestOwners={owner,other,ownerKerb,otherKerb,testerKerb}; UserGroupInformation[] simpleTestRenewers={owner,other,ownerKerb,otherKerb}; for ( final UserGroupInformation tokOwner : simpleTestOwners) { for ( final UserGroupInformation tokRenewer : simpleTestRenewers) { try { tester.doAs(new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { try { checkTokenCancellation(tokOwner,tokRenewer); Assert.fail("We should not reach here; token owner = " + tokOwner.getUserName() + ", renewer = "+ tokRenewer.getUserName()); return null; } catch ( YarnException ex) { Assert.assertTrue(ex.getMessage().contains(tester.getUserName() + " is not authorized to cancel the token")); return null; } } } ); } catch ( Exception e) { Assert.fail("Unexpected exception; " + e.getMessage()); } } } }

APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testGetContainers() throws YarnException, IOException { ClientRMService rmService=createRMService(); RecordFactory recordFactory=RecordFactoryProvider.getRecordFactory(null); GetContainersRequest request=recordFactory.newRecordInstance(GetContainersRequest.class); ApplicationAttemptId attemptId=ApplicationAttemptId.newInstance(ApplicationId.newInstance(123456,1),1); ContainerId containerId=ContainerId.newInstance(attemptId,1); request.setApplicationAttemptId(attemptId); try { GetContainersResponse response=rmService.getContainers(request); Assert.assertEquals(containerId,response.getContainerList().get(0).getContainerId()); } catch ( ApplicationNotFoundException ex) { Assert.fail(ex.getMessage()); } }

Class: org.apache.hadoop.yarn.server.resourcemanager.TestClientRMTokens

UtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testDelegationToken() throws IOException, InterruptedException { final YarnConfiguration conf=new YarnConfiguration(); conf.set(YarnConfiguration.RM_PRINCIPAL,"testuser/localhost@apache.org"); conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,"kerberos"); UserGroupInformation.setConfiguration(conf); ResourceScheduler scheduler=createMockScheduler(conf); long initialInterval=10000l; long maxLifetime=20000l; long renewInterval=10000l; RMDelegationTokenSecretManager rmDtSecretManager=createRMDelegationTokenSecretManager(initialInterval,maxLifetime,renewInterval); rmDtSecretManager.startThreads(); LOG.info("Creating DelegationTokenSecretManager with initialInterval: " + initialInterval + ", maxLifetime: "+ maxLifetime+ ", renewInterval: "+ renewInterval); final ClientRMService clientRMService=new ClientRMServiceForTest(conf,scheduler,rmDtSecretManager); clientRMService.init(conf); clientRMService.start(); ApplicationClientProtocol clientRMWithDT=null; try { UserGroupInformation loggedInUser=UserGroupInformation.createRemoteUser("testrenewer@APACHE.ORG"); Assert.assertEquals("testrenewer",loggedInUser.getShortUserName()); loggedInUser.setAuthenticationMethod(AuthenticationMethod.KERBEROS); org.apache.hadoop.yarn.api.records.Token token=getDelegationToken(loggedInUser,clientRMService,loggedInUser.getShortUserName()); long tokenFetchTime=System.currentTimeMillis(); LOG.info("Got delegation token at: " + tokenFetchTime); clientRMWithDT=getClientRMProtocolWithDT(token,clientRMService.getBindAddress(),"loginuser1",conf); GetNewApplicationRequest request=Records.newRecord(GetNewApplicationRequest.class); try { clientRMWithDT.getNewApplication(request); } catch ( IOException e) { fail("Unexpected exception" + e); } catch ( YarnException e) { fail("Unexpected exception" + e); } while (System.currentTimeMillis() < tokenFetchTime + initialInterval / 2) { Thread.sleep(500l); } long nextExpTime=renewDelegationToken(loggedInUser,clientRMService,token); long renewalTime=System.currentTimeMillis(); LOG.info("Renewed token at: " + renewalTime + ", NextExpiryTime: "+ nextExpTime); while (System.currentTimeMillis() > tokenFetchTime + initialInterval && System.currentTimeMillis() < nextExpTime) { Thread.sleep(500l); } Thread.sleep(50l); try { clientRMWithDT.getNewApplication(request); } catch ( IOException e) { fail("Unexpected exception" + e); } catch ( YarnException e) { fail("Unexpected exception" + e); } while (System.currentTimeMillis() < renewalTime + renewInterval) { Thread.sleep(500l); } Thread.sleep(50l); LOG.info("At time: " + System.currentTimeMillis() + ", token should be invalid"); try { clientRMWithDT.getNewApplication(request); fail("Should not have succeeded with an expired token"); } catch ( Exception e) { assertEquals(InvalidToken.class.getName(),e.getClass().getName()); assertTrue(e.getMessage().contains("is expired")); } if (clientRMWithDT != null) { RPC.stopProxy(clientRMWithDT); clientRMWithDT=null; } token=getDelegationToken(loggedInUser,clientRMService,loggedInUser.getShortUserName()); tokenFetchTime=System.currentTimeMillis(); LOG.info("Got delegation token at: " + tokenFetchTime); clientRMWithDT=getClientRMProtocolWithDT(token,clientRMService.getBindAddress(),"loginuser2",conf); request=Records.newRecord(GetNewApplicationRequest.class); try { clientRMWithDT.getNewApplication(request); } catch ( IOException e) { fail("Unexpected exception" + e); } catch ( YarnException e) { fail("Unexpected exception" + e); } cancelDelegationToken(loggedInUser,clientRMService,token); if (clientRMWithDT != null) { RPC.stopProxy(clientRMWithDT); clientRMWithDT=null; } clientRMWithDT=getClientRMProtocolWithDT(token,clientRMService.getBindAddress(),"loginuser2",conf); LOG.info("Cancelled delegation token at: " + System.currentTimeMillis()); try { clientRMWithDT.getNewApplication(request); fail("Should not have succeeded with a cancelled delegation token"); } catch ( IOException e) { } catch ( YarnException e) { } } finally { rmDtSecretManager.stopThreads(); if (clientRMWithDT != null) { RPC.stopProxy(clientRMWithDT); } } }

Class: org.apache.hadoop.yarn.server.resourcemanager.TestFifoScheduler

UtilityVerifier BooleanVerifier HybridVerifier 
@Test(timeout=30000) public void testConfValidation() throws Exception { FifoScheduler scheduler=new FifoScheduler(); Configuration conf=new YarnConfiguration(); conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB,2048); conf.setInt(YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_MB,1024); try { scheduler.serviceInit(conf); fail("Exception is expected because the min memory allocation is" + " larger than the max memory allocation."); } catch ( YarnRuntimeException e) { assertTrue("The thrown exception is not the expected one.",e.getMessage().startsWith("Invalid resource scheduler memory")); } }

Class: org.apache.hadoop.yarn.server.resourcemanager.TestKillApplicationWithRMHA

UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
@Test(timeout=20000) public void testKillAppWhenFailoverHappensAtNewState() throws Exception { startRMsWithCustomizedRMAppManager(); MockNM nm1=new MockNM("127.0.0.1:1234",15120,rm1.getResourceTrackerService()); nm1.registerNode(); RMApp app0=rm1.submitApp(200,"",UserGroupInformation.getCurrentUser().getShortUserName(),null,false,null,configuration.getInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS),null,null,false,false); try { failOverAndKillApp(app0.getApplicationId(),RMAppState.NEW); fail("Should get an exception here"); } catch ( ApplicationNotFoundException ex) { Assert.assertTrue(ex.getMessage().contains("Trying to kill an absent application " + app0.getApplicationId())); } }

Class: org.apache.hadoop.yarn.server.resourcemanager.TestMoveApplication

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testMoveTooLate() throws Exception { Application application=new Application("user1",resourceManager); ApplicationId appId=application.getApplicationId(); application.submit(); ClientRMService clientRMService=resourceManager.getClientRMService(); clientRMService.forceKillApplication(KillApplicationRequest.newInstance(appId)); RMApp rmApp=resourceManager.getRMContext().getRMApps().get(appId); while (rmApp.getState() != RMAppState.KILLED) { Thread.sleep(100); } try { clientRMService.moveApplicationAcrossQueues(MoveApplicationAcrossQueuesRequest.newInstance(appId,"newqueue")); fail("Should have hit exception"); } catch ( YarnException ex) { assertEquals(YarnException.class,ex.getClass()); assertEquals("App in KILLED state cannot be moved.",ex.getMessage()); } }

UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testMoveRejectedByScheduler() throws Exception { failMove=true; Application application=new Application("user1",resourceManager); application.submit(); RMApp app=resourceManager.rmContext.getRMApps().get(application.getApplicationId()); while (app.getState() != RMAppState.ACCEPTED) { Thread.sleep(100); } ClientRMService clientRMService=resourceManager.getClientRMService(); try { clientRMService.moveApplicationAcrossQueues(MoveApplicationAcrossQueuesRequest.newInstance(application.getApplicationId(),"newqueue")); fail("Should have hit exception"); } catch ( YarnException ex) { assertEquals("Move not supported",ex.getCause().getMessage()); } }

UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testMoveRejectedByPermissions() throws Exception { failMove=true; final Application application=new Application("user1",resourceManager); application.submit(); final ClientRMService clientRMService=resourceManager.getClientRMService(); try { UserGroupInformation.createRemoteUser("otheruser").doAs(new PrivilegedExceptionAction(){ @Override public MoveApplicationAcrossQueuesResponse run() throws Exception { return clientRMService.moveApplicationAcrossQueues(MoveApplicationAcrossQueuesRequest.newInstance(application.getApplicationId(),"newqueue")); } } ); fail("Should have hit exception"); } catch ( Exception ex) { assertEquals(AccessControlException.class,ex.getCause().getCause().getClass()); } }

Class: org.apache.hadoop.yarn.server.resourcemanager.TestRM

APIUtilityVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=80000) public void testInvalidateAMHostPortWhenAMFailedOrKilled() throws Exception { YarnConfiguration conf=new YarnConfiguration(); conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,1); MockRM rm1=new MockRM(conf); rm1.start(); RMApp app1=rm1.submitApp(200); MockNM nm1=new MockNM("127.0.0.1:1234",15120,rm1.getResourceTrackerService()); nm1.registerNode(); MockAM am1=MockRM.launchAndRegisterAM(app1,rm1,nm1); MockRM.finishAMAndVerifyAppState(app1,rm1,nm1,am1); RMApp app2=rm1.submitApp(200); MockAM am2=MockRM.launchAndRegisterAM(app2,rm1,nm1); nm1.nodeHeartbeat(am2.getApplicationAttemptId(),1,ContainerState.COMPLETE); am2.waitForState(RMAppAttemptState.FAILED); rm1.waitForState(app2.getApplicationId(),RMAppState.FAILED); RMApp app3=rm1.submitApp(200); MockAM am3=MockRM.launchAndRegisterAM(app3,rm1,nm1); rm1.killApp(app3.getApplicationId()); rm1.waitForState(app3.getApplicationId(),RMAppState.KILLED); rm1.waitForState(am3.getApplicationAttemptId(),RMAppAttemptState.KILLED); GetApplicationsRequest request1=GetApplicationsRequest.newInstance(EnumSet.of(YarnApplicationState.FINISHED,YarnApplicationState.KILLED,YarnApplicationState.FAILED)); GetApplicationsResponse response1=rm1.getClientRMService().getApplications(request1); List appList1=response1.getApplicationList(); Assert.assertEquals(3,appList1.size()); for ( ApplicationReport report : appList1) { if (report.getApplicationId().equals(app2.getApplicationId()) || report.getApplicationId().equals(app3.getApplicationId())) { Assert.assertEquals("N/A",report.getHost()); Assert.assertEquals(-1,report.getRpcPort()); } if (report.getApplicationId().equals(app1.getApplicationId())) { Assert.assertFalse(report.getHost().equals("N/A")); Assert.assertTrue(report.getRpcPort() != -1); } } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=20000) public void testNMTokenSentForNormalContainer() throws Exception { YarnConfiguration conf=new YarnConfiguration(); conf.set(YarnConfiguration.RM_SCHEDULER,CapacityScheduler.class.getCanonicalName()); MockRM rm=new MockRM(conf); rm.start(); MockNM nm1=rm.registerNode("h1:1234",5120); RMApp app=rm.submitApp(2000); RMAppAttempt attempt=app.getCurrentAppAttempt(); CapacityScheduler cs=(CapacityScheduler)rm.getResourceScheduler(); cs.getApplicationAttempt(attempt.getAppAttemptId()).getNewContainerId(); nm1.nodeHeartbeat(true); MockAM am=MockRM.launchAM(app,rm,nm1); Assert.assertTrue(attempt.getMasterContainer().getId().getId() != 1); Assert.assertFalse(rm.getRMContext().getNMTokenSecretManager().isApplicationAttemptNMTokenPresent(attempt.getAppAttemptId(),nm1.getNodeId())); am.registerAppAttempt(); rm.waitForState(app.getApplicationId(),RMAppState.RUNNING); int NUM_CONTAINERS=1; List containers=new ArrayList(); List expectedNMTokens=new ArrayList(); while (true) { AllocateResponse response=am.allocate("127.0.0.1",2000,NUM_CONTAINERS,new ArrayList()); nm1.nodeHeartbeat(true); containers.addAll(response.getAllocatedContainers()); expectedNMTokens.addAll(response.getNMTokens()); if (containers.size() == NUM_CONTAINERS) { break; } Thread.sleep(200); System.out.println("Waiting for container to be allocated."); } NodeId nodeId=expectedNMTokens.get(0).getNodeId(); Assert.assertEquals(nm1.getNodeId(),nodeId); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=40000) public void testNMToken() throws Exception { MockRM rm=new MockRM(); try { rm.start(); MockNM nm1=rm.registerNode("h1:1234",10000); NMTokenSecretManagerInRM nmTokenSecretManager=rm.getRMContext().getNMTokenSecretManager(); RMApp app=rm.submitApp(1000); nm1.nodeHeartbeat(true); RMAppAttempt attempt=app.getCurrentAppAttempt(); MockAM am=rm.sendAMLaunched(attempt.getAppAttemptId()); Assert.assertTrue(nmTokenSecretManager.isApplicationAttemptRegistered(attempt.getAppAttemptId())); am.registerAppAttempt(); ArrayList containersReceivedForNM1=new ArrayList(); List releaseContainerList=new ArrayList(); HashMap nmTokens=new HashMap(); AllocateResponse response=am.allocate("h1",1000,2,releaseContainerList); Assert.assertEquals(0,response.getAllocatedContainers().size()); allocateContainersAndValidateNMTokens(am,containersReceivedForNM1,2,nmTokens,nm1); Assert.assertEquals(1,nmTokens.size()); response=am.allocate("h1",1000,2,releaseContainerList); Assert.assertEquals(0,response.getAllocatedContainers().size()); allocateContainersAndValidateNMTokens(am,containersReceivedForNM1,4,nmTokens,nm1); Assert.assertEquals(1,nmTokens.size()); MockNM nm2=rm.registerNode("h2:1234",10000); nm2.nodeHeartbeat(true); ArrayList containersReceivedForNM2=new ArrayList(); response=am.allocate("h2",1000,2,releaseContainerList); Assert.assertEquals(0,response.getAllocatedContainers().size()); allocateContainersAndValidateNMTokens(am,containersReceivedForNM2,2,nmTokens,nm2); Assert.assertEquals(2,nmTokens.size()); nm2=rm.registerNode("h2:1234",10000); Map nodes=rm.getRMContext().getRMNodes(); while (nodes.get(nm2.getNodeId()).getLastNodeHeartBeatResponse().getResponseId() > 0) { Thread.sleep(WAIT_SLEEP_MS); } int interval=40; while (nmTokenSecretManager.isApplicationAttemptNMTokenPresent(attempt.getAppAttemptId(),nm2.getNodeId()) && interval-- > 0) { LOG.info("waiting for nmToken to be cleared for : " + nm2.getNodeId()); Thread.sleep(WAIT_SLEEP_MS); } Assert.assertTrue(nmTokenSecretManager.isApplicationAttemptRegistered(attempt.getAppAttemptId())); nmTokens.remove(nm2.getNodeId().toString()); Assert.assertEquals(1,nmTokens.size()); response=am.allocate("h2",1000,2,releaseContainerList); Assert.assertEquals(0,response.getAllocatedContainers().size()); allocateContainersAndValidateNMTokens(am,containersReceivedForNM2,4,nmTokens,nm2); Assert.assertEquals(2,nmTokens.size()); Assert.assertTrue(nmTokenSecretManager.isApplicationAttemptNMTokenPresent(attempt.getAppAttemptId(),nm1.getNodeId())); Assert.assertTrue(nmTokenSecretManager.isApplicationAttemptNMTokenPresent(attempt.getAppAttemptId(),nm2.getNodeId())); nmTokenSecretManager.rollMasterKey(); nmTokenSecretManager.activateNextMasterKey(); Assert.assertFalse(nmTokenSecretManager.isApplicationAttemptNMTokenPresent(attempt.getAppAttemptId(),nm1.getNodeId())); Assert.assertFalse(nmTokenSecretManager.isApplicationAttemptNMTokenPresent(attempt.getAppAttemptId(),nm2.getNodeId())); Assert.assertTrue(nmTokenSecretManager.isApplicationAttemptRegistered(attempt.getAppAttemptId())); nmTokens.clear(); Assert.assertEquals(0,nmTokens.size()); response=am.allocate("h2",1000,1,releaseContainerList); Assert.assertEquals(0,response.getAllocatedContainers().size()); allocateContainersAndValidateNMTokens(am,containersReceivedForNM2,5,nmTokens,nm2); Assert.assertEquals(1,nmTokens.size()); Assert.assertTrue(nmTokenSecretManager.isApplicationAttemptNMTokenPresent(attempt.getAppAttemptId(),nm2.getNodeId())); Assert.assertTrue(nmTokenSecretManager.isApplicationAttemptRegistered(attempt.getAppAttemptId())); am.unregisterAppAttempt(); for ( Container container : containersReceivedForNM1) { nm1.nodeHeartbeat(attempt.getAppAttemptId(),container.getId().getId(),ContainerState.COMPLETE); } for ( Container container : containersReceivedForNM2) { nm2.nodeHeartbeat(attempt.getAppAttemptId(),container.getId().getId(),ContainerState.COMPLETE); } nm1.nodeHeartbeat(am.getApplicationAttemptId(),1,ContainerState.COMPLETE); am.waitForState(RMAppAttemptState.FINISHED); Assert.assertFalse(nmTokenSecretManager.isApplicationAttemptRegistered(attempt.getAppAttemptId())); } finally { rm.stop(); } }

Class: org.apache.hadoop.yarn.server.resourcemanager.TestRMAdminService

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testAdminRefreshQueuesWithLocalConfigurationProvider() throws IOException, YarnException { rm=new MockRM(configuration); rm.init(configuration); rm.start(); CapacityScheduler cs=(CapacityScheduler)rm.getRMContext().getScheduler(); int maxAppsBefore=cs.getConfiguration().getMaximumSystemApplications(); try { rm.adminService.refreshQueues(RefreshQueuesRequest.newInstance()); Assert.assertEquals(maxAppsBefore,cs.getConfiguration().getMaximumSystemApplications()); } catch ( Exception ex) { fail("Using localConfigurationProvider. Should not get any exception."); } }

APIUtilityVerifier BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
@Test public void testRMInitialsWithFileSystemBasedConfigurationProvider() throws Exception { configuration.set(YarnConfiguration.RM_CONFIGURATION_PROVIDER_CLASS,"org.apache.hadoop.yarn.FileSystemBasedConfigurationProvider"); final File excludeHostsFile=new File(tmpDir.toString(),"excludeHosts"); if (excludeHostsFile.exists()) { excludeHostsFile.delete(); } if (!excludeHostsFile.createNewFile()) { Assert.fail("Can not create " + "excludeHosts"); } PrintWriter fileWriter=new PrintWriter(excludeHostsFile); fileWriter.write("0.0.0.0:123"); fileWriter.close(); uploadToRemoteFileSystem(new Path(excludeHostsFile.getAbsolutePath())); YarnConfiguration yarnConf=new YarnConfiguration(); yarnConf.set(YarnConfiguration.YARN_ADMIN_ACL,"world:anyone:rwcda"); yarnConf.set(YarnConfiguration.RM_NODES_EXCLUDE_FILE_PATH,this.workingPath + "/excludeHosts"); uploadConfiguration(yarnConf,"yarn-site.xml"); CapacitySchedulerConfiguration csConf=new CapacitySchedulerConfiguration(); csConf.set("yarn.scheduler.capacity.maximum-applications","5000"); uploadConfiguration(csConf,"capacity-scheduler.xml"); String aclsString="alice,bob users,wheel"; Configuration newConf=new Configuration(); newConf.set("security.applicationclient.protocol.acl",aclsString); uploadConfiguration(newConf,"hadoop-policy.xml"); Configuration conf=new Configuration(); conf.setBoolean(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION,true); conf.set("hadoop.proxyuser.test.groups","test_groups"); conf.set("hadoop.proxyuser.test.hosts","test_hosts"); conf.setClass(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,MockUnixGroupsMapping.class,GroupMappingServiceProvider.class); uploadConfiguration(conf,"core-site.xml"); MockUnixGroupsMapping.updateGroups(); ResourceManager resourceManager=null; try { try { resourceManager=new ResourceManager(); resourceManager.init(configuration); resourceManager.start(); } catch ( Exception ex) { fail("Should not get any exceptions"); } Set excludeHosts=resourceManager.getRMContext().getNodesListManager().getHostsReader().getExcludedHosts(); Assert.assertTrue(excludeHosts.size() == 1); Assert.assertTrue(excludeHosts.contains("0.0.0.0:123")); String aclStringAfter=resourceManager.adminService.getAccessControlList().getAclString().trim(); Assert.assertEquals(aclStringAfter,"world:anyone:rwcda"); CapacityScheduler cs=(CapacityScheduler)resourceManager.getRMContext().getScheduler(); int maxAppsAfter=cs.getConfiguration().getMaximumSystemApplications(); Assert.assertEquals(maxAppsAfter,5000); ServiceAuthorizationManager adminServiceServiceManager=resourceManager.adminService.getServer().getServiceAuthorizationManager(); verifyServiceACLsRefresh(adminServiceServiceManager,org.apache.hadoop.yarn.api.ApplicationClientProtocolPB.class,aclsString); ServiceAuthorizationManager clientRMServiceServiceManager=resourceManager.getRMContext().getClientRMService().getServer().getServiceAuthorizationManager(); verifyServiceACLsRefresh(clientRMServiceServiceManager,org.apache.hadoop.yarn.api.ApplicationClientProtocolPB.class,aclsString); ServiceAuthorizationManager appMasterService=resourceManager.getRMContext().getApplicationMasterService().getServer().getServiceAuthorizationManager(); verifyServiceACLsRefresh(appMasterService,org.apache.hadoop.yarn.api.ApplicationClientProtocolPB.class,aclsString); ServiceAuthorizationManager RTService=resourceManager.getRMContext().getResourceTrackerService().getServer().getServiceAuthorizationManager(); verifyServiceACLsRefresh(RTService,org.apache.hadoop.yarn.api.ApplicationClientProtocolPB.class,aclsString); Assert.assertTrue(ProxyUsers.getDefaultImpersonationProvider().getProxyGroups().get("hadoop.proxyuser.test.groups").size() == 1); Assert.assertTrue(ProxyUsers.getDefaultImpersonationProvider().getProxyGroups().get("hadoop.proxyuser.test.groups").contains("test_groups")); Assert.assertTrue(ProxyUsers.getDefaultImpersonationProvider().getProxyHosts().get("hadoop.proxyuser.test.hosts").size() == 1); Assert.assertTrue(ProxyUsers.getDefaultImpersonationProvider().getProxyHosts().get("hadoop.proxyuser.test.hosts").contains("test_hosts")); List groupAfter=Groups.getUserToGroupsMappingService(configuration).getGroups(UserGroupInformation.getCurrentUser().getUserName()); Assert.assertTrue(groupAfter.contains("test_group_D") && groupAfter.contains("test_group_E") && groupAfter.contains("test_group_F")&& groupAfter.size() == 3); } finally { if (resourceManager != null) { resourceManager.stop(); } } }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testRMHAWithFileSystemBasedConfiguration() throws IOException, YarnException { StateChangeRequestInfo requestInfo=new StateChangeRequestInfo(HAServiceProtocol.RequestSource.REQUEST_BY_USER); configuration.set(YarnConfiguration.RM_CONFIGURATION_PROVIDER_CLASS,"org.apache.hadoop.yarn.FileSystemBasedConfigurationProvider"); configuration.setBoolean(YarnConfiguration.RM_HA_ENABLED,true); configuration.setBoolean(YarnConfiguration.AUTO_FAILOVER_ENABLED,false); configuration.set(YarnConfiguration.RM_HA_IDS,"rm1,rm2"); int base=100; for ( String confKey : YarnConfiguration.getServiceAddressConfKeys(configuration)) { configuration.set(HAUtil.addSuffix(confKey,"rm1"),"0.0.0.0:" + (base + 20)); configuration.set(HAUtil.addSuffix(confKey,"rm2"),"0.0.0.0:" + (base + 40)); base=base * 2; } Configuration conf1=new Configuration(configuration); conf1.set(YarnConfiguration.RM_HA_ID,"rm1"); Configuration conf2=new Configuration(configuration); conf2.set(YarnConfiguration.RM_HA_ID,"rm2"); uploadDefaultConfiguration(); MockRM rm1=null; MockRM rm2=null; try { rm1=new MockRM(conf1); rm1.init(conf1); rm1.start(); Assert.assertTrue(rm1.getRMContext().getHAServiceState() == HAServiceState.STANDBY); rm2=new MockRM(conf2); rm2.init(conf1); rm2.start(); Assert.assertTrue(rm2.getRMContext().getHAServiceState() == HAServiceState.STANDBY); rm1.adminService.transitionToActive(requestInfo); Assert.assertTrue(rm1.getRMContext().getHAServiceState() == HAServiceState.ACTIVE); CapacitySchedulerConfiguration csConf=new CapacitySchedulerConfiguration(); csConf.set("yarn.scheduler.capacity.maximum-applications","5000"); uploadConfiguration(csConf,"capacity-scheduler.xml"); rm1.adminService.refreshQueues(RefreshQueuesRequest.newInstance()); int maxApps=((CapacityScheduler)rm1.getRMContext().getScheduler()).getConfiguration().getMaximumSystemApplications(); Assert.assertEquals(maxApps,5000); int maxAppsBeforeFailOver=((CapacityScheduler)rm2.getRMContext().getScheduler()).getConfiguration().getMaximumSystemApplications(); Assert.assertEquals(maxAppsBeforeFailOver,10000); rm1.adminService.transitionToStandby(requestInfo); rm2.adminService.transitionToActive(requestInfo); Assert.assertTrue(rm1.getRMContext().getHAServiceState() == HAServiceState.STANDBY); Assert.assertTrue(rm2.getRMContext().getHAServiceState() == HAServiceState.ACTIVE); int maxAppsAfter=((CapacityScheduler)rm2.getRMContext().getScheduler()).getConfiguration().getMaximumSystemApplications(); Assert.assertEquals(maxAppsAfter,5000); } finally { if (rm1 != null) { rm1.stop(); } if (rm2 != null) { rm2.stop(); } } }

UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
@Test public void testAdminAclsWithFileSystemBasedConfigurationProvider() throws IOException, YarnException { configuration.set(YarnConfiguration.RM_CONFIGURATION_PROVIDER_CLASS,"org.apache.hadoop.yarn.FileSystemBasedConfigurationProvider"); uploadDefaultConfiguration(); try { rm=new MockRM(configuration); rm.init(configuration); rm.start(); } catch ( Exception ex) { fail("Should not get any exceptions"); } String aclStringBefore=rm.adminService.getAccessControlList().getAclString().trim(); YarnConfiguration yarnConf=new YarnConfiguration(); yarnConf.set(YarnConfiguration.YARN_ADMIN_ACL,"world:anyone:rwcda"); uploadConfiguration(yarnConf,"yarn-site.xml"); rm.adminService.refreshAdminAcls(RefreshAdminAclsRequest.newInstance()); String aclStringAfter=rm.adminService.getAccessControlList().getAclString().trim(); Assert.assertTrue(!aclStringAfter.equals(aclStringBefore)); Assert.assertEquals(aclStringAfter,"world:anyone:rwcda"); }

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
@Test public void testRefreshUserToGroupsMappingsWithFileSystemBasedConfigurationProvider() throws IOException, YarnException { configuration.set(YarnConfiguration.RM_CONFIGURATION_PROVIDER_CLASS,"org.apache.hadoop.yarn.FileSystemBasedConfigurationProvider"); String[] defaultTestUserGroups={"dummy_group1","dummy_group2"}; UserGroupInformation ugi=UserGroupInformation.createUserForTesting("dummyUser",defaultTestUserGroups); String user=ugi.getUserName(); List groupWithInit=new ArrayList(2); for (int i=0; i < ugi.getGroupNames().length; i++) { groupWithInit.add(ugi.getGroupNames()[i]); } uploadDefaultConfiguration(); Configuration conf=new Configuration(); conf.setClass(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,MockUnixGroupsMapping.class,GroupMappingServiceProvider.class); uploadConfiguration(conf,"core-site.xml"); try { rm=new MockRM(configuration); rm.init(configuration); rm.start(); } catch ( Exception ex) { fail("Should not get any exceptions"); } List groupBefore=new ArrayList(Groups.getUserToGroupsMappingService(configuration).getGroups(user)); Assert.assertTrue(groupBefore.contains("test_group_A") && groupBefore.contains("test_group_B") && groupBefore.contains("test_group_C")&& groupBefore.size() == 3); Assert.assertTrue(groupWithInit.size() != groupBefore.size()); Assert.assertFalse(groupWithInit.contains("test_group_A") || groupWithInit.contains("test_group_B") || groupWithInit.contains("test_group_C")); MockUnixGroupsMapping.updateGroups(); rm.adminService.refreshUserToGroupsMappings(RefreshUserToGroupsMappingsRequest.newInstance()); List groupAfter=Groups.getUserToGroupsMappingService(configuration).getGroups(user); Assert.assertTrue(groupAfter.contains("test_group_D") && groupAfter.contains("test_group_E") && groupAfter.contains("test_group_F")&& groupAfter.size() == 3); }

BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
@Test public void testRefreshNodesWithFileSystemBasedConfigurationProvider() throws IOException, YarnException { configuration.set(YarnConfiguration.RM_CONFIGURATION_PROVIDER_CLASS,"org.apache.hadoop.yarn.FileSystemBasedConfigurationProvider"); uploadDefaultConfiguration(); try { rm=new MockRM(configuration); rm.init(configuration); rm.start(); } catch ( Exception ex) { fail("Should not get any exceptions"); } final File excludeHostsFile=new File(tmpDir.toString(),"excludeHosts"); if (excludeHostsFile.exists()) { excludeHostsFile.delete(); } if (!excludeHostsFile.createNewFile()) { Assert.fail("Can not create " + "excludeHosts"); } PrintWriter fileWriter=new PrintWriter(excludeHostsFile); fileWriter.write("0.0.0.0:123"); fileWriter.close(); uploadToRemoteFileSystem(new Path(excludeHostsFile.getAbsolutePath())); Configuration yarnConf=new YarnConfiguration(); yarnConf.set(YarnConfiguration.RM_NODES_EXCLUDE_FILE_PATH,this.workingPath + "/excludeHosts"); uploadConfiguration(yarnConf,YarnConfiguration.YARN_SITE_CONFIGURATION_FILE); rm.adminService.refreshNodes(RefreshNodesRequest.newInstance()); Set excludeHosts=rm.getNodesListManager().getHostsReader().getExcludedHosts(); Assert.assertTrue(excludeHosts.size() == 1); Assert.assertTrue(excludeHosts.contains("0.0.0.0:123")); }

UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testAdminRefreshQueuesWithFileSystemBasedConfigurationProvider() throws IOException, YarnException { configuration.set(YarnConfiguration.RM_CONFIGURATION_PROVIDER_CLASS,"org.apache.hadoop.yarn.FileSystemBasedConfigurationProvider"); uploadDefaultConfiguration(); try { rm=new MockRM(configuration); rm.init(configuration); rm.start(); } catch ( Exception ex) { fail("Should not get any exceptions"); } CapacityScheduler cs=(CapacityScheduler)rm.getRMContext().getScheduler(); int maxAppsBefore=cs.getConfiguration().getMaximumSystemApplications(); CapacitySchedulerConfiguration csConf=new CapacitySchedulerConfiguration(); csConf.set("yarn.scheduler.capacity.maximum-applications","5000"); uploadConfiguration(csConf,"capacity-scheduler.xml"); rm.adminService.refreshQueues(RefreshQueuesRequest.newInstance()); int maxAppsAfter=cs.getConfiguration().getMaximumSystemApplications(); Assert.assertEquals(maxAppsAfter,5000); Assert.assertTrue(maxAppsAfter != maxAppsBefore); }

UtilityVerifier BooleanVerifier HybridVerifier 
@Test public void testRefreshSuperUserGroupsWithFileSystemBasedConfigurationProvider() throws IOException, YarnException { configuration.set(YarnConfiguration.RM_CONFIGURATION_PROVIDER_CLASS,"org.apache.hadoop.yarn.FileSystemBasedConfigurationProvider"); uploadDefaultConfiguration(); try { rm=new MockRM(configuration); rm.init(configuration); rm.start(); } catch ( Exception ex) { fail("Should not get any exceptions"); } Configuration coreConf=new Configuration(false); coreConf.set("hadoop.proxyuser.test.groups","test_groups"); coreConf.set("hadoop.proxyuser.test.hosts","test_hosts"); uploadConfiguration(coreConf,"core-site.xml"); rm.adminService.refreshSuperUserGroupsConfiguration(RefreshSuperUserGroupsConfigurationRequest.newInstance()); Assert.assertTrue(ProxyUsers.getDefaultImpersonationProvider().getProxyGroups().get("hadoop.proxyuser.test.groups").size() == 1); Assert.assertTrue(ProxyUsers.getDefaultImpersonationProvider().getProxyGroups().get("hadoop.proxyuser.test.groups").contains("test_groups")); Assert.assertTrue(ProxyUsers.getDefaultImpersonationProvider().getProxyHosts().get("hadoop.proxyuser.test.hosts").size() == 1); Assert.assertTrue(ProxyUsers.getDefaultImpersonationProvider().getProxyHosts().get("hadoop.proxyuser.test.hosts").contains("test_hosts")); }

Class: org.apache.hadoop.yarn.server.resourcemanager.TestRMHA

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testRMDispatcherForHA() throws IOException { String errorMessageForEventHandler="Expect to get the same number of handlers"; String errorMessageForService="Expect to get the same number of services"; configuration.setBoolean(YarnConfiguration.AUTO_FAILOVER_ENABLED,false); Configuration conf=new YarnConfiguration(configuration); rm=new MockRM(conf){ @Override protected Dispatcher createDispatcher(){ return new MyCountingDispatcher(); } } ; rm.init(conf); int expectedEventHandlerCount=((MyCountingDispatcher)rm.getRMContext().getDispatcher()).getEventHandlerCount(); int expectedServiceCount=rm.getServices().size(); assertTrue(expectedEventHandlerCount != 0); StateChangeRequestInfo requestInfo=new StateChangeRequestInfo(HAServiceProtocol.RequestSource.REQUEST_BY_USER); assertEquals(STATE_ERR,HAServiceState.INITIALIZING,rm.adminService.getServiceStatus().getState()); assertFalse("RM is ready to become active before being started",rm.adminService.getServiceStatus().isReadyToBecomeActive()); rm.start(); rm.adminService.transitionToStandby(requestInfo); rm.adminService.transitionToActive(requestInfo); rm.adminService.transitionToStandby(requestInfo); rm.adminService.transitionToActive(requestInfo); rm.adminService.transitionToStandby(requestInfo); MyCountingDispatcher dispatcher=(MyCountingDispatcher)rm.getRMContext().getDispatcher(); assertTrue(!dispatcher.isStopped()); rm.adminService.transitionToActive(requestInfo); assertEquals(errorMessageForEventHandler,expectedEventHandlerCount,((MyCountingDispatcher)rm.getRMContext().getDispatcher()).getEventHandlerCount()); assertEquals(errorMessageForService,expectedServiceCount,rm.getServices().size()); dispatcher=(MyCountingDispatcher)rm.getRMContext().getDispatcher(); rm.adminService.transitionToStandby(requestInfo); assertEquals(errorMessageForEventHandler,expectedEventHandlerCount,((MyCountingDispatcher)rm.getRMContext().getDispatcher()).getEventHandlerCount()); assertEquals(errorMessageForService,expectedServiceCount,rm.getServices().size()); assertTrue(dispatcher.isStopped()); rm.stop(); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test to verify the following RM HA transitions to the following states. * 1. Standby: Should be a no-op * 2. Active: Active services should start * 3. Active: Should be a no-op. * While active, submit a couple of jobs * 4. Standby: Active services should stop * 5. Active: Active services should start * 6. Stop the RM: All services should stop and RM should not be ready to * become Active */ @Test(timeout=30000) public void testFailoverAndTransitions() throws Exception { configuration.setBoolean(YarnConfiguration.AUTO_FAILOVER_ENABLED,false); Configuration conf=new YarnConfiguration(configuration); rm=new MockRM(conf); rm.init(conf); StateChangeRequestInfo requestInfo=new StateChangeRequestInfo(HAServiceProtocol.RequestSource.REQUEST_BY_USER); assertEquals(STATE_ERR,HAServiceState.INITIALIZING,rm.adminService.getServiceStatus().getState()); assertFalse("RM is ready to become active before being started",rm.adminService.getServiceStatus().isReadyToBecomeActive()); checkMonitorHealth(); rm.start(); checkMonitorHealth(); checkStandbyRMFunctionality(); verifyClusterMetrics(0,0,0,0,0,0); rm.adminService.transitionToStandby(requestInfo); checkMonitorHealth(); checkStandbyRMFunctionality(); verifyClusterMetrics(0,0,0,0,0,0); rm.adminService.transitionToActive(requestInfo); checkMonitorHealth(); checkActiveRMFunctionality(); verifyClusterMetrics(1,1,1,1,2048,1); rm.adminService.transitionToActive(requestInfo); checkMonitorHealth(); checkActiveRMFunctionality(); verifyClusterMetrics(1,2,2,2,2048,2); rm.adminService.transitionToStandby(requestInfo); checkMonitorHealth(); checkStandbyRMFunctionality(); verifyClusterMetrics(0,0,0,0,0,0); rm.adminService.transitionToActive(requestInfo); checkMonitorHealth(); checkActiveRMFunctionality(); verifyClusterMetrics(1,1,1,1,2048,1); rm.stop(); assertEquals(STATE_ERR,HAServiceState.STOPPING,rm.adminService.getServiceStatus().getState()); assertFalse("RM is ready to become active even after it is stopped",rm.adminService.getServiceStatus().isReadyToBecomeActive()); assertFalse("Active RM services are started",rm.areActiveServicesRunning()); checkMonitorHealth(); }

UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testHAIDLookup(){ Configuration conf=new YarnConfiguration(configuration); rm=new MockRM(conf); rm.init(conf); assertEquals(conf.get(YarnConfiguration.RM_HA_ID),RM2_NODE_ID); configuration.set(YarnConfiguration.RM_HA_ID,RM1_NODE_ID); conf=new YarnConfiguration(configuration); rm=new MockRM(conf); rm.init(conf); assertEquals(conf.get(YarnConfiguration.RM_HA_ID),RM1_NODE_ID); configuration.set(YarnConfiguration.RM_HA_IDS,RM1_NODE_ID + "," + RM3_NODE_ID); configuration.unset(YarnConfiguration.RM_HA_ID); conf=new YarnConfiguration(configuration); try { rm=new MockRM(conf); rm.init(conf); fail("Should get an exception here."); } catch ( Exception ex) { Assert.assertTrue(ex.getMessage().contains("Invalid configuration! Can not find valid RM_HA_ID.")); } }

Class: org.apache.hadoop.yarn.server.resourcemanager.TestRMNodeTransitions

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testAdd(){ RMNodeImpl node=getNewNode(); ClusterMetrics cm=ClusterMetrics.getMetrics(); int initialActive=cm.getNumActiveNMs(); int initialLost=cm.getNumLostNMs(); int initialUnhealthy=cm.getUnhealthyNMs(); int initialDecommissioned=cm.getNumDecommisionedNMs(); int initialRebooted=cm.getNumRebootedNMs(); node.handle(new RMNodeStartedEvent(node.getNodeID(),null,null)); Assert.assertEquals("Active Nodes",initialActive + 1,cm.getNumActiveNMs()); Assert.assertEquals("Lost Nodes",initialLost,cm.getNumLostNMs()); Assert.assertEquals("Unhealthy Nodes",initialUnhealthy,cm.getUnhealthyNMs()); Assert.assertEquals("Decommissioned Nodes",initialDecommissioned,cm.getNumDecommisionedNMs()); Assert.assertEquals("Rebooted Nodes",initialRebooted,cm.getNumRebootedNMs()); Assert.assertEquals(NodeState.RUNNING,node.getState()); Assert.assertNotNull(nodesListManagerEvent); Assert.assertEquals(NodesListManagerEventType.NODE_USABLE,nodesListManagerEvent.getType()); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testReconnect(){ RMNodeImpl node=getRunningNode(); ClusterMetrics cm=ClusterMetrics.getMetrics(); int initialActive=cm.getNumActiveNMs(); int initialLost=cm.getNumLostNMs(); int initialUnhealthy=cm.getUnhealthyNMs(); int initialDecommissioned=cm.getNumDecommisionedNMs(); int initialRebooted=cm.getNumRebootedNMs(); node.handle(new RMNodeReconnectEvent(node.getNodeID(),node,null)); Assert.assertEquals("Active Nodes",initialActive,cm.getNumActiveNMs()); Assert.assertEquals("Lost Nodes",initialLost,cm.getNumLostNMs()); Assert.assertEquals("Unhealthy Nodes",initialUnhealthy,cm.getUnhealthyNMs()); Assert.assertEquals("Decommissioned Nodes",initialDecommissioned,cm.getNumDecommisionedNMs()); Assert.assertEquals("Rebooted Nodes",initialRebooted,cm.getNumRebootedNMs()); Assert.assertEquals(NodeState.RUNNING,node.getState()); Assert.assertNotNull(nodesListManagerEvent); Assert.assertEquals(NodesListManagerEventType.NODE_USABLE,nodesListManagerEvent.getType()); }

Class: org.apache.hadoop.yarn.server.resourcemanager.TestRMRestart

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=60000) public void testRMRestartAppRunningAMFailed() throws Exception { conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS); MemoryRMStateStore memStore=new MemoryRMStateStore(); memStore.init(conf); RMState rmState=memStore.getState(); Map rmAppState=rmState.getApplicationState(); MockRM rm1=new MockRM(conf,memStore); rm1.start(); MockNM nm1=new MockNM("127.0.0.1:1234",15120,rm1.getResourceTrackerService()); nm1.registerNode(); RMApp app0=rm1.submitApp(200); MockAM am0=launchAM(app0,rm1,nm1); nm1.nodeHeartbeat(am0.getApplicationAttemptId(),1,ContainerState.COMPLETE); am0.waitForState(RMAppAttemptState.FAILED); ApplicationState appState=rmAppState.get(app0.getApplicationId()); Assert.assertEquals(RMAppAttemptState.FAILED,appState.getAttempt(am0.getApplicationAttemptId()).getState()); Assert.assertNull(rmAppState.get(app0.getApplicationId()).getState()); rm1.waitForState(app0.getApplicationId(),RMAppState.ACCEPTED); MockRM rm2=new MockRM(conf,memStore); rm2.start(); rm2.waitForState(am0.getApplicationAttemptId(),RMAppAttemptState.FAILED); rm1.stop(); rm2.stop(); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@SuppressWarnings("rawtypes") @Test(timeout=180000) public void testRMRestart() throws Exception { conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS); MemoryRMStateStore memStore=new MemoryRMStateStore(); memStore.init(conf); RMState rmState=memStore.getState(); Map rmAppState=rmState.getApplicationState(); MockRM rm1=new MockRM(conf,memStore); rm1.start(); MockNM nm1=new MockNM("127.0.0.1:1234",15120,rm1.getResourceTrackerService()); MockNM nm2=new MockNM("127.0.0.2:5678",15120,rm1.getResourceTrackerService()); nm1.registerNode(); nm2.registerNode(); RMApp app0=rm1.submitApp(200); RMAppAttempt attempt0=app0.getCurrentAppAttempt(); Assert.assertEquals(1,rmAppState.size()); nm1.nodeHeartbeat(true); MockAM am0=rm1.sendAMLaunched(attempt0.getAppAttemptId()); am0.registerAppAttempt(); finishApplicationMaster(app0,rm1,nm1,am0); RMApp app1=rm1.submitApp(200); ApplicationState appState=rmAppState.get(app1.getApplicationId()); Assert.assertNotNull(appState); Assert.assertEquals(0,appState.getAttemptCount()); Assert.assertEquals(appState.getApplicationSubmissionContext().getApplicationId(),app1.getApplicationSubmissionContext().getApplicationId()); nm1.nodeHeartbeat(true); RMAppAttempt attempt1=app1.getCurrentAppAttempt(); ApplicationAttemptId attemptId1=attempt1.getAppAttemptId(); rm1.waitForState(attemptId1,RMAppAttemptState.ALLOCATED); Assert.assertEquals(1,appState.getAttemptCount()); ApplicationAttemptState attemptState=appState.getAttempt(attemptId1); Assert.assertNotNull(attemptState); Assert.assertEquals(BuilderUtils.newContainerId(attemptId1,1),attemptState.getMasterContainer().getId()); MockAM am1=rm1.sendAMLaunched(attempt1.getAppAttemptId()); am1.registerAppAttempt(); am1.allocate("127.0.0.1",1000,1,new ArrayList()); nm1.nodeHeartbeat(true); List conts=am1.allocate(new ArrayList(),new ArrayList()).getAllocatedContainers(); while (conts.size() == 0) { nm1.nodeHeartbeat(true); conts.addAll(am1.allocate(new ArrayList(),new ArrayList()).getAllocatedContainers()); Thread.sleep(500); } RMApp app2=rm1.submitApp(200); appState=rmAppState.get(app2.getApplicationId()); Assert.assertNotNull(appState); Assert.assertEquals(0,appState.getAttemptCount()); Assert.assertEquals(appState.getApplicationSubmissionContext().getApplicationId(),app2.getApplicationSubmissionContext().getApplicationId()); RMApp appUnmanaged=rm1.submitApp(200,"someApp","someUser",null,true,null,conf.getInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS),null); ApplicationAttemptId unmanagedAttemptId=appUnmanaged.getCurrentAppAttempt().getAppAttemptId(); ApplicationId unmanagedAppId=appUnmanaged.getApplicationId(); appState=rmAppState.get(unmanagedAppId); Assert.assertNotNull(appState); rm1.waitForState(unmanagedAttemptId,RMAppAttemptState.LAUNCHED); rm1.waitForState(unmanagedAppId,RMAppState.ACCEPTED); Assert.assertEquals(1,appState.getAttemptCount()); Assert.assertEquals(appState.getApplicationSubmissionContext().getApplicationId(),appUnmanaged.getApplicationSubmissionContext().getApplicationId()); MockRM rm2=new MockRM(conf,memStore); rm2.start(); nm1.setResourceTrackerService(rm2.getResourceTrackerService()); nm2.setResourceTrackerService(rm2.getResourceTrackerService()); Assert.assertEquals(4,rm2.getRMContext().getRMApps().size()); rm2.waitForState(app0.getApplicationId(),RMAppState.FINISHED); rm2.waitForState(am0.getApplicationAttemptId(),RMAppAttemptState.FINISHED); RMApp loadedApp1=rm2.getRMContext().getRMApps().get(app1.getApplicationId()); Assert.assertNotNull(loadedApp1); Assert.assertEquals(1,loadedApp1.getAppAttempts().size()); Assert.assertEquals(app1.getApplicationSubmissionContext().getApplicationId(),loadedApp1.getApplicationSubmissionContext().getApplicationId()); RMApp loadedApp2=rm2.getRMContext().getRMApps().get(app2.getApplicationId()); Assert.assertNotNull(loadedApp2); Assert.assertEquals(app2.getApplicationSubmissionContext().getApplicationId(),loadedApp2.getApplicationSubmissionContext().getApplicationId()); rm2.waitForState(loadedApp1.getApplicationId(),RMAppState.ACCEPTED); rm2.waitForState(loadedApp2.getApplicationId(),RMAppState.ACCEPTED); Assert.assertEquals(1,loadedApp1.getAppAttempts().size()); Assert.assertEquals(1,loadedApp2.getAppAttempts().size()); am1.setAMRMProtocol(rm2.getApplicationMasterService(),rm2.getRMContext()); AllocateResponse allocResponse=am1.allocate(new ArrayList(),new ArrayList()); Assert.assertEquals(AMCommand.AM_SHUTDOWN,allocResponse.getAMCommand()); NodeHeartbeatResponse hbResponse=nm1.nodeHeartbeat(true); Assert.assertEquals(NodeAction.RESYNC,hbResponse.getNodeAction()); hbResponse=nm2.nodeHeartbeat(true); Assert.assertEquals(NodeAction.RESYNC,hbResponse.getNodeAction()); nm1=new MockNM("127.0.0.1:1234",15120,rm2.getResourceTrackerService()); nm2=new MockNM("127.0.0.2:5678",15120,rm2.getResourceTrackerService()); NMContainerStatus status=TestRMRestart.createNMContainerStatus(loadedApp1.getCurrentAppAttempt().getAppAttemptId(),1,ContainerState.COMPLETE); nm1.registerNode(Arrays.asList(status),null); nm2.registerNode(); rm2.waitForState(loadedApp1.getApplicationId(),RMAppState.ACCEPTED); int timeoutSecs=0; while (loadedApp1.getAppAttempts().size() != 2 && timeoutSecs++ < 40) { ; Thread.sleep(200); } hbResponse=nm1.nodeHeartbeat(true); Assert.assertTrue(NodeAction.RESYNC != hbResponse.getNodeAction()); hbResponse=nm2.nodeHeartbeat(true); Assert.assertTrue(NodeAction.RESYNC != hbResponse.getNodeAction()); attempt1=loadedApp1.getCurrentAppAttempt(); attemptId1=attempt1.getAppAttemptId(); rm2.waitForState(attemptId1,RMAppAttemptState.ALLOCATED); appState=rmAppState.get(loadedApp1.getApplicationId()); attemptState=appState.getAttempt(attemptId1); Assert.assertNotNull(attemptState); Assert.assertEquals(BuilderUtils.newContainerId(attemptId1,1),attemptState.getMasterContainer().getId()); MockNM am1Node=nm1; if (attemptState.getMasterContainer().getNodeId().toString().contains("127.0.0.2")) { am1Node=nm2; } RMAppAttempt attempt2=loadedApp2.getCurrentAppAttempt(); ApplicationAttemptId attemptId2=attempt2.getAppAttemptId(); rm2.waitForState(attemptId2,RMAppAttemptState.ALLOCATED); appState=rmAppState.get(loadedApp2.getApplicationId()); attemptState=appState.getAttempt(attemptId2); Assert.assertNotNull(attemptState); Assert.assertEquals(BuilderUtils.newContainerId(attemptId2,1),attemptState.getMasterContainer().getId()); MockNM am2Node=nm1; if (attemptState.getMasterContainer().getNodeId().toString().contains("127.0.0.2")) { am2Node=nm2; } am1=rm2.sendAMLaunched(attempt1.getAppAttemptId()); am1.registerAppAttempt(); MockAM am2=rm2.sendAMLaunched(attempt2.getAppAttemptId()); am2.registerAppAttempt(); am1.allocate("127.0.0.1",1000,3,new ArrayList()); am2.allocate("127.0.0.2",1000,1,new ArrayList()); nm1.nodeHeartbeat(true); nm2.nodeHeartbeat(true); conts=am1.allocate(new ArrayList(),new ArrayList()).getAllocatedContainers(); while (conts.size() == 0) { nm1.nodeHeartbeat(true); nm2.nodeHeartbeat(true); conts.addAll(am1.allocate(new ArrayList(),new ArrayList()).getAllocatedContainers()); Thread.sleep(500); } finishApplicationMaster(loadedApp1,rm2,am1Node,am1); finishApplicationMaster(loadedApp2,rm2,am2Node,am2); rm2.stop(); rm1.stop(); Assert.assertEquals(4,rmAppState.size()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=60000) public void testRMRestartFailedApp() throws Exception { conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,1); MemoryRMStateStore memStore=new MemoryRMStateStore(); memStore.init(conf); RMState rmState=memStore.getState(); Map rmAppState=rmState.getApplicationState(); MockRM rm1=new MockRM(conf,memStore); rm1.start(); MockNM nm1=new MockNM("127.0.0.1:1234",15120,rm1.getResourceTrackerService()); nm1.registerNode(); RMApp app0=rm1.submitApp(200); MockAM am0=launchAM(app0,rm1,nm1); nm1.nodeHeartbeat(am0.getApplicationAttemptId(),1,ContainerState.COMPLETE); am0.waitForState(RMAppAttemptState.FAILED); rm1.waitForState(app0.getApplicationId(),RMAppState.FAILED); ApplicationState appState=rmAppState.get(app0.getApplicationId()); Assert.assertEquals(RMAppState.FAILED,appState.getState()); Assert.assertEquals(RMAppAttemptState.FAILED,appState.getAttempt(am0.getApplicationAttemptId()).getState()); MockRM rm2=new MockRM(conf,memStore); rm2.start(); RMApp loadedApp0=rm2.getRMContext().getRMApps().get(app0.getApplicationId()); rm2.waitForState(app0.getApplicationId(),RMAppState.FAILED); rm2.waitForState(am0.getApplicationAttemptId(),RMAppAttemptState.FAILED); Assert.assertEquals(1,loadedApp0.getAppAttempts().size()); verifyAppReportAfterRMRestart(app0,rm2); Assert.assertTrue(app0.getDiagnostics().toString().contains("Failing the application.")); rm1.stop(); rm2.stop(); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=60000) public void testRMStateStoreDispatcherDrainedOnRMStop() throws Exception { MemoryRMStateStore memStore=new MemoryRMStateStore(){ volatile boolean wait=true; @Override public void serviceStop() throws Exception { wait=false; super.serviceStop(); } @Override protected void handleStoreEvent( RMStateStoreEvent event){ while (wait) ; super.handleStoreEvent(event); } } ; memStore.init(conf); final MockRM rm1=new MockRM(conf,memStore); rm1.start(); final ArrayList appList=new ArrayList(); final int NUM_APPS=5; for (int i=0; i < NUM_APPS; i++) { RMApp app=rm1.submitApp(200,"name","user",new HashMap(),false,"default",-1,null,"MAPREDUCE",false); appList.add(app); rm1.waitForState(app.getApplicationId(),RMAppState.NEW_SAVING); } Map rmAppState=memStore.getState().getApplicationState(); Assert.assertTrue(rmAppState.size() == 0); rm1.stop(); for ( RMApp app : appList) { ApplicationState appState=rmAppState.get(app.getApplicationId()); Assert.assertNotNull(appState); Assert.assertEquals(0,appState.getAttemptCount()); Assert.assertEquals(appState.getApplicationSubmissionContext().getApplicationId(),app.getApplicationSubmissionContext().getApplicationId()); } Assert.assertTrue(rmAppState.size() == NUM_APPS); }

TestInitializer BooleanVerifier HybridVerifier 
@Before public void setup() throws UnknownHostException { Logger rootLogger=LogManager.getRootLogger(); rootLogger.setLevel(Level.DEBUG); conf=new YarnConfiguration(); UserGroupInformation.setConfiguration(conf); conf.set(YarnConfiguration.RECOVERY_ENABLED,"true"); conf.set(YarnConfiguration.RM_STORE,MemoryRMStateStore.class.getName()); rmAddr=new InetSocketAddress("localhost",8032); Assert.assertTrue(YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS > 1); }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=60000) public void testDelegationTokenRestoredInDelegationTokenRenewer() throws Exception { conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,2); conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,"kerberos"); UserGroupInformation.setConfiguration(conf); MemoryRMStateStore memStore=new MemoryRMStateStore(); memStore.init(conf); RMState rmState=memStore.getState(); Map rmAppState=rmState.getApplicationState(); MockRM rm1=new TestSecurityMockRM(conf,memStore); rm1.start(); HashSet> tokenSet=new HashSet>(); Credentials ts=new Credentials(); Text userText1=new Text("user1"); RMDelegationTokenIdentifier dtId1=new RMDelegationTokenIdentifier(userText1,new Text("renewer1"),userText1); Token token1=new Token(dtId1,rm1.getRMContext().getRMDelegationTokenSecretManager()); SecurityUtil.setTokenService(token1,rmAddr); ts.addToken(userText1,token1); tokenSet.add(token1); Text userText2=new Text("user2"); RMDelegationTokenIdentifier dtId2=new RMDelegationTokenIdentifier(userText2,new Text("renewer2"),userText2); Token token2=new Token(dtId2,rm1.getRMContext().getRMDelegationTokenSecretManager()); SecurityUtil.setTokenService(token2,rmAddr); ts.addToken(userText2,token2); tokenSet.add(token2); RMApp app=rm1.submitApp(200,"name","user",new HashMap(),false,"default",1,ts); ApplicationState appState=rmAppState.get(app.getApplicationId()); Assert.assertNotNull(appState); Assert.assertEquals(tokenSet,rm1.getRMContext().getDelegationTokenRenewer().getDelegationTokens()); DataOutputBuffer dob=new DataOutputBuffer(); ts.writeTokenStorageToStream(dob); ByteBuffer securityTokens=ByteBuffer.wrap(dob.getData(),0,dob.getLength()); securityTokens.rewind(); Assert.assertEquals(securityTokens,appState.getApplicationSubmissionContext().getAMContainerSpec().getTokens()); MockRM rm2=new TestSecurityMockRM(conf,memStore); rm2.start(); waitForTokensToBeRenewed(rm2); Assert.assertEquals(tokenSet,rm2.getRMContext().getDelegationTokenRenewer().getDelegationTokens()); rm1.stop(); rm2.stop(); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=60000) public void testFinishedAppRemovalAfterRMRestart() throws Exception { MemoryRMStateStore memStore=new MemoryRMStateStore(); conf.setInt(YarnConfiguration.RM_MAX_COMPLETED_APPLICATIONS,1); memStore.init(conf); RMState rmState=memStore.getState(); MockRM rm1=new MockRM(conf,memStore); rm1.start(); MockNM nm1=new MockNM("127.0.0.1:1234",15120,rm1.getResourceTrackerService()); nm1.registerNode(); RMApp app0=rm1.submitApp(200); MockAM am0=launchAM(app0,rm1,nm1); finishApplicationMaster(app0,rm1,nm1,am0); MockRM rm2=new MockRM(conf,memStore); rm2.start(); nm1.setResourceTrackerService(rm2.getResourceTrackerService()); nm1=rm2.registerNode("127.0.0.1:1234",15120); Map rmAppState=rmState.getApplicationState(); Assert.assertEquals(RMAppState.FINISHED,rmAppState.get(app0.getApplicationId()).getState()); rm2.waitForState(app0.getApplicationId(),RMAppState.FINISHED); RMApp app1=rm2.submitApp(200); MockAM am1=launchAM(app1,rm2,nm1); finishApplicationMaster(app1,rm2,nm1,am1); Assert.assertNull(rm2.getRMContext().getRMApps().get(app0.getApplicationId())); Assert.assertNull(rmAppState.get(app0.getApplicationId())); rm1.stop(); rm2.stop(); }

APIUtilityVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=60000) public void testRMRestartGetApplicationList() throws Exception { conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,1); MemoryRMStateStore memStore=new MemoryRMStateStore(); memStore.init(conf); MockRM rm1=new MockRM(conf,memStore); rm1.start(); MockNM nm1=new MockNM("127.0.0.1:1234",15120,rm1.getResourceTrackerService()); nm1.registerNode(); RMApp app0=rm1.submitApp(200,"name","user",null,false,"default",1,null,"myType"); MockAM am0=launchAM(app0,rm1,nm1); finishApplicationMaster(app0,rm1,nm1,am0); RMApp app1=rm1.submitApp(200,"name","user",null,false,"default",1,null,"myType"); MockAM am1=launchAM(app1,rm1,nm1); nm1.nodeHeartbeat(am1.getApplicationAttemptId(),1,ContainerState.COMPLETE); am1.waitForState(RMAppAttemptState.FAILED); rm1.waitForState(app1.getApplicationId(),RMAppState.FAILED); RMApp app2=rm1.submitApp(200,"name","user",null,false,"default",1,null,"myType"); MockAM am2=launchAM(app2,rm1,nm1); rm1.killApp(app2.getApplicationId()); rm1.waitForState(app2.getApplicationId(),RMAppState.KILLED); rm1.waitForState(am2.getApplicationAttemptId(),RMAppAttemptState.KILLED); MockRM rm2=new MockRM(conf,memStore){ @Override protected RMAppManager createRMAppManager(){ return spy(super.createRMAppManager()); } } ; rm2.start(); GetApplicationsRequest request1=GetApplicationsRequest.newInstance(EnumSet.of(YarnApplicationState.FINISHED,YarnApplicationState.KILLED,YarnApplicationState.FAILED)); GetApplicationsResponse response1=rm2.getClientRMService().getApplications(request1); List appList1=response1.getApplicationList(); boolean forApp0=false, forApp1=false, forApp2=false; for ( ApplicationReport report : appList1) { if (report.getApplicationId().equals(app0.getApplicationId())) { Assert.assertEquals(YarnApplicationState.FINISHED,report.getYarnApplicationState()); forApp0=true; } if (report.getApplicationId().equals(app1.getApplicationId())) { Assert.assertEquals(YarnApplicationState.FAILED,report.getYarnApplicationState()); forApp1=true; } if (report.getApplicationId().equals(app2.getApplicationId())) { Assert.assertEquals(YarnApplicationState.KILLED,report.getYarnApplicationState()); forApp2=true; } } Assert.assertTrue(forApp0 && forApp1 && forApp2); Set appTypes=new HashSet(); appTypes.add("myType"); GetApplicationsRequest request2=GetApplicationsRequest.newInstance(appTypes); GetApplicationsResponse response2=rm2.getClientRMService().getApplications(request2); List appList2=response2.getApplicationList(); Assert.assertTrue(3 == appList2.size()); verify(rm2.getRMAppManager(),times(3)).logApplicationSummary(isA(ApplicationId.class)); rm1.stop(); rm2.stop(); }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=60000) public void testRMRestartOnMaxAppAttempts() throws Exception { conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS); MemoryRMStateStore memStore=new MemoryRMStateStore(); memStore.init(conf); RMState rmState=memStore.getState(); Map rmAppState=rmState.getApplicationState(); MockRM rm1=new MockRM(conf,memStore); rm1.start(); MockNM nm1=new MockNM("127.0.0.1:1234",15120,rm1.getResourceTrackerService()); nm1.registerNode(); RMApp app1=rm1.submitApp(200,"name","user",new HashMap(),false,"default",1,null); RMApp app2=rm1.submitApp(200,"name","user",new HashMap(),false,"default",-1,null); ApplicationState appState=rmAppState.get(app1.getApplicationId()); Assert.assertNotNull(appState); Assert.assertEquals(0,appState.getAttemptCount()); Assert.assertEquals(appState.getApplicationSubmissionContext().getApplicationId(),app1.getApplicationSubmissionContext().getApplicationId()); nm1.nodeHeartbeat(true); RMAppAttempt attempt=app1.getCurrentAppAttempt(); ApplicationAttemptId attemptId1=attempt.getAppAttemptId(); rm1.waitForState(attemptId1,RMAppAttemptState.ALLOCATED); Assert.assertEquals(1,appState.getAttemptCount()); ApplicationAttemptState attemptState=appState.getAttempt(attemptId1); Assert.assertNotNull(attemptState); Assert.assertEquals(BuilderUtils.newContainerId(attemptId1,1),attemptState.getMasterContainer().getId()); conf.setInt(YarnConfiguration.RM_AM_EXPIRY_INTERVAL_MS,3000); MockRM rm2=new MockRM(conf,memStore); rm2.start(); Assert.assertEquals(2,rm2.getRMContext().getRMApps().get(app2.getApplicationId()).getMaxAppAttempts()); Assert.assertEquals(2,rm2.getRMContext().getRMApps().size()); rm2.waitForState(app1.getApplicationId(),RMAppState.FAILED); rm2.waitForState(app2.getApplicationId(),RMAppState.ACCEPTED); Assert.assertEquals(RMAppState.FAILED,rmAppState.get(app1.getApplicationId()).getState()); Assert.assertNull(rmAppState.get(app2.getApplicationId()).getState()); rm1.stop(); rm2.stop(); }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=60000) public void testAppAttemptTokensRestoredOnRMRestart() throws Exception { conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,2); conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,"kerberos"); UserGroupInformation.setConfiguration(conf); MemoryRMStateStore memStore=new MemoryRMStateStore(); memStore.init(conf); RMState rmState=memStore.getState(); Map rmAppState=rmState.getApplicationState(); MockRM rm1=new TestSecurityMockRM(conf,memStore); rm1.start(); MockNM nm1=new MockNM("0.0.0.0:4321",15120,rm1.getResourceTrackerService()); nm1.registerNode(); RMApp app1=rm1.submitApp(200,"name","user",new HashMap(),"default"); ApplicationState appState=rmAppState.get(app1.getApplicationId()); Assert.assertNotNull(appState); nm1.nodeHeartbeat(true); RMAppAttempt attempt1=app1.getCurrentAppAttempt(); ApplicationAttemptId attemptId1=attempt1.getAppAttemptId(); rm1.waitForState(attemptId1,RMAppAttemptState.ALLOCATED); ApplicationAttemptState attemptState=appState.getAttempt(attemptId1); Assert.assertNotNull(attemptState); Assert.assertEquals(BuilderUtils.newContainerId(attemptId1,1),attemptState.getMasterContainer().getId()); byte[] clientTokenMasterKey=attempt1.getClientTokenMasterKey().getEncoded(); Credentials savedCredentials=attemptState.getAppAttemptCredentials(); Assert.assertArrayEquals("client token master key not saved",clientTokenMasterKey,savedCredentials.getSecretKey(RMStateStore.AM_CLIENT_TOKEN_MASTER_KEY_NAME)); MockRM rm2=new TestSecurityMockRM(conf,memStore); rm2.start(); RMApp loadedApp1=rm2.getRMContext().getRMApps().get(app1.getApplicationId()); RMAppAttempt loadedAttempt1=loadedApp1.getRMAppAttempt(attemptId1); Assert.assertNotNull(loadedAttempt1); Assert.assertEquals("client token master key not restored",attempt1.getClientTokenMasterKey(),loadedAttempt1.getClientTokenMasterKey()); Assert.assertArrayEquals(clientTokenMasterKey,rm2.getClientToAMTokenSecretManager().getMasterKey(attemptId1).getEncoded()); Token amrmToken=loadedAttempt1.getAMRMToken(); Assert.assertArrayEquals(amrmToken.getPassword(),rm2.getRMContext().getAMRMTokenSecretManager().retrievePassword(amrmToken.decodeIdentifier())); rm1.stop(); rm2.stop(); }

UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
@Test(timeout=10000) public void testRMShutdown() throws Exception { MemoryRMStateStore memStore=new MemoryRMStateStore(){ @Override public synchronized void checkVersion() throws Exception { throw new Exception("Invalid version."); } } ; memStore.init(conf); MockRM rm1=null; try { rm1=new MockRM(conf,memStore); rm1.start(); Assert.fail(); } catch ( Exception e) { Assert.assertTrue(e.getMessage().contains("Invalid version.")); } Assert.assertTrue(rm1.getServiceState() == STATE.STOPPED); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=60000) public void testClientRetryOnKillingApplication() throws Exception { MemoryRMStateStore memStore=new TestMemoryRMStateStore(); memStore.init(conf); MockRM rm1=new MockRM(conf,memStore); rm1.start(); MockNM nm1=new MockNM("127.0.0.1:1234",15120,rm1.getResourceTrackerService()); nm1.registerNode(); RMApp app1=rm1.submitApp(200,"name","user",null,false,"default",1,null,"myType"); MockAM am1=launchAM(app1,rm1,nm1); KillApplicationResponse response; int count=0; while (true) { response=rm1.killApp(app1.getApplicationId()); if (response.getIsKillCompleted()) { break; } Thread.sleep(100); count++; } Assert.assertTrue(count >= 1); rm1.waitForState(am1.getApplicationAttemptId(),RMAppAttemptState.KILLED); rm1.waitForState(app1.getApplicationId(),RMAppState.KILLED); Assert.assertEquals(1,((TestMemoryRMStateStore)memStore).updateAttempt); Assert.assertEquals(2,((TestMemoryRMStateStore)memStore).updateApp); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=60000) public void testRMRestartWaitForPreviousSucceededAttempt() throws Exception { conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,2); MemoryRMStateStore memStore=new MemoryRMStateStore(){ int count=0; @Override public void updateApplicationStateInternal( ApplicationId appId, ApplicationStateData appStateData) throws Exception { if (count == 0) { LOG.info(appId + " final state is not saved."); count++; } else { super.updateApplicationStateInternal(appId,appStateData); } } } ; memStore.init(conf); RMState rmState=memStore.getState(); Map rmAppState=rmState.getApplicationState(); MockRM rm1=new MockRM(conf,memStore); rm1.start(); MockNM nm1=rm1.registerNode("127.0.0.1:1234",15120); RMApp app0=rm1.submitApp(200); MockAM am0=MockRM.launchAndRegisterAM(app0,rm1,nm1); FinishApplicationMasterRequest req=FinishApplicationMasterRequest.newInstance(FinalApplicationStatus.SUCCEEDED,"",""); am0.unregisterAppAttempt(req,true); am0.waitForState(RMAppAttemptState.FINISHING); Assert.assertNull(rmAppState.get(app0.getApplicationId()).getState()); MockRM rm2=new MockRM(conf,memStore); nm1.setResourceTrackerService(rm2.getResourceTrackerService()); rm2.start(); rm2.waitForState(app0.getCurrentAppAttempt().getAppAttemptId(),RMAppAttemptState.FINISHED); rm2.waitForState(app0.getApplicationId(),RMAppState.FINISHED); Assert.assertEquals(RMAppState.FINISHED,rmAppState.get(app0.getApplicationId()).getState()); }

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=60000) public void testRMDelegationTokenRestoredOnRMRestart() throws Exception { conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,2); conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,"kerberos"); conf.set(YarnConfiguration.RM_ADDRESS,"localhost:8032"); UserGroupInformation.setConfiguration(conf); MemoryRMStateStore memStore=new MemoryRMStateStore(); memStore.init(conf); RMState rmState=memStore.getState(); Map rmAppState=rmState.getApplicationState(); Map rmDTState=rmState.getRMDTSecretManagerState().getTokenState(); Set rmDTMasterKeyState=rmState.getRMDTSecretManagerState().getMasterKeyState(); MockRM rm1=new TestSecurityMockRM(conf,memStore); rm1.start(); Credentials ts=new Credentials(); GetDelegationTokenRequest request1=GetDelegationTokenRequest.newInstance("renewer1"); UserGroupInformation.getCurrentUser().setAuthenticationMethod(AuthMethod.KERBEROS); GetDelegationTokenResponse response1=rm1.getClientRMService().getDelegationToken(request1); org.apache.hadoop.yarn.api.records.Token delegationToken1=response1.getRMDelegationToken(); Token token1=ConverterUtils.convertFromYarn(delegationToken1,rmAddr); RMDelegationTokenIdentifier dtId1=token1.decodeIdentifier(); HashSet tokenIdentSet=new HashSet(); ts.addToken(token1.getService(),token1); tokenIdentSet.add(dtId1); RMApp app=rm1.submitApp(200,"name","user",new HashMap(),false,"default",1,ts); ApplicationState appState=rmAppState.get(app.getApplicationId()); Assert.assertNotNull(appState); Set allKeysRM1=rm1.getRMContext().getRMDelegationTokenSecretManager().getAllMasterKeys(); Assert.assertEquals(allKeysRM1,rmDTMasterKeyState); Map allTokensRM1=rm1.getRMContext().getRMDelegationTokenSecretManager().getAllTokens(); Assert.assertEquals(tokenIdentSet,allTokensRM1.keySet()); Assert.assertEquals(allTokensRM1,rmDTState); Assert.assertEquals(rm1.getRMContext().getRMDelegationTokenSecretManager().getLatestDTSequenceNumber(),rmState.getRMDTSecretManagerState().getDTSequenceNumber()); GetDelegationTokenRequest request2=GetDelegationTokenRequest.newInstance("renewer2"); GetDelegationTokenResponse response2=rm1.getClientRMService().getDelegationToken(request2); org.apache.hadoop.yarn.api.records.Token delegationToken2=response2.getRMDelegationToken(); Token token2=ConverterUtils.convertFromYarn(delegationToken2,rmAddr); RMDelegationTokenIdentifier dtId2=token2.decodeIdentifier(); try { rm1.getRMContext().getRMDelegationTokenSecretManager().cancelToken(token2,UserGroupInformation.getCurrentUser().getUserName()); } catch ( Exception e) { Assert.fail(); } Assert.assertEquals(rm1.getRMContext().getRMDelegationTokenSecretManager().getLatestDTSequenceNumber(),dtId2.getSequenceNumber()); Assert.assertFalse(rmDTState.containsKey(dtId2)); MockRM rm2=new TestSecurityMockRM(conf,memStore); rm2.start(); Map allTokensRM2=rm2.getRMContext().getRMDelegationTokenSecretManager().getAllTokens(); Assert.assertEquals(allTokensRM2.keySet(),allTokensRM1.keySet()); Assert.assertTrue(rm2.getRMContext().getRMDelegationTokenSecretManager().getAllMasterKeys().containsAll(allKeysRM1)); Assert.assertEquals(rm1.getRMContext().getRMDelegationTokenSecretManager().getLatestDTSequenceNumber(),rm2.getRMContext().getRMDelegationTokenSecretManager().getLatestDTSequenceNumber()); Long renewDateBeforeRenew=allTokensRM2.get(dtId1); try { Thread.sleep(1); rm2.getRMContext().getRMDelegationTokenSecretManager().renewToken(token1,"renewer1"); } catch ( Exception e) { Assert.fail(); } allTokensRM2=rm2.getRMContext().getRMDelegationTokenSecretManager().getAllTokens(); Long renewDateAfterRenew=allTokensRM2.get(dtId1); Assert.assertTrue(renewDateAfterRenew > renewDateBeforeRenew); Assert.assertTrue(rmDTState.containsValue(renewDateAfterRenew)); Assert.assertFalse(rmDTState.containsValue(renewDateBeforeRenew)); try { rm2.getRMContext().getRMDelegationTokenSecretManager().cancelToken(token1,UserGroupInformation.getCurrentUser().getUserName()); } catch ( Exception e) { Assert.fail(); } allTokensRM2=rm2.getRMContext().getRMDelegationTokenSecretManager().getAllTokens(); Assert.assertFalse(allTokensRM2.containsKey(dtId1)); Assert.assertFalse(rmDTState.containsKey(dtId1)); rm1.stop(); rm2.stop(); }

Class: org.apache.hadoop.yarn.server.resourcemanager.TestResourceTrackerService

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testReboot() throws Exception { Configuration conf=new Configuration(); rm=new MockRM(conf); rm.start(); MockNM nm1=rm.registerNode("host1:1234",5120); MockNM nm2=rm.registerNode("host2:1234",2048); int initialMetricCount=ClusterMetrics.getMetrics().getNumRebootedNMs(); NodeHeartbeatResponse nodeHeartbeat=nm1.nodeHeartbeat(true); Assert.assertTrue(NodeAction.NORMAL.equals(nodeHeartbeat.getNodeAction())); nodeHeartbeat=nm2.nodeHeartbeat(new HashMap>(),true,-100); Assert.assertTrue(NodeAction.RESYNC.equals(nodeHeartbeat.getNodeAction())); Assert.assertEquals("Too far behind rm response id:0 nm response id:-100",nodeHeartbeat.getDiagnosticsMessage()); checkRebootedNMCount(rm,++initialMetricCount); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Decommissioning using a pre-configured include hosts file */ @Test public void testDecommissionWithIncludeHosts() throws Exception { writeToHostsFile("localhost","host1","host2"); Configuration conf=new Configuration(); conf.set(YarnConfiguration.RM_NODES_INCLUDE_FILE_PATH,hostFile.getAbsolutePath()); rm=new MockRM(conf); rm.start(); MockNM nm1=rm.registerNode("host1:1234",5120); MockNM nm2=rm.registerNode("host2:5678",10240); MockNM nm3=rm.registerNode("localhost:4433",1024); ClusterMetrics metrics=ClusterMetrics.getMetrics(); assert (metrics != null); int metricCount=metrics.getNumDecommisionedNMs(); NodeHeartbeatResponse nodeHeartbeat=nm1.nodeHeartbeat(true); Assert.assertTrue(NodeAction.NORMAL.equals(nodeHeartbeat.getNodeAction())); nodeHeartbeat=nm2.nodeHeartbeat(true); Assert.assertTrue(NodeAction.NORMAL.equals(nodeHeartbeat.getNodeAction())); nodeHeartbeat=nm3.nodeHeartbeat(true); Assert.assertTrue(NodeAction.NORMAL.equals(nodeHeartbeat.getNodeAction())); String ip=NetUtils.normalizeHostName("localhost"); writeToHostsFile("host1",ip); rm.getNodesListManager().refreshNodes(conf); nodeHeartbeat=nm1.nodeHeartbeat(true); Assert.assertTrue(NodeAction.NORMAL.equals(nodeHeartbeat.getNodeAction())); Assert.assertEquals(0,ClusterMetrics.getMetrics().getNumDecommisionedNMs()); nodeHeartbeat=nm2.nodeHeartbeat(true); Assert.assertTrue("Node is not decommisioned.",NodeAction.SHUTDOWN.equals(nodeHeartbeat.getNodeAction())); checkDecommissionedNMCount(rm,++metricCount); nodeHeartbeat=nm3.nodeHeartbeat(true); Assert.assertTrue(NodeAction.NORMAL.equals(nodeHeartbeat.getNodeAction())); Assert.assertEquals(metricCount,ClusterMetrics.getMetrics().getNumDecommisionedNMs()); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testNodeRegistrationVersionLessThanRM() throws Exception { writeToHostsFile("host2"); Configuration conf=new Configuration(); conf.set(YarnConfiguration.RM_NODES_INCLUDE_FILE_PATH,hostFile.getAbsolutePath()); conf.set(YarnConfiguration.RM_NODEMANAGER_MINIMUM_VERSION,"EqualToRM"); rm=new MockRM(conf); rm.start(); String nmVersion="1.9.9"; ResourceTrackerService resourceTrackerService=rm.getResourceTrackerService(); RegisterNodeManagerRequest req=Records.newRecord(RegisterNodeManagerRequest.class); NodeId nodeId=NodeId.newInstance("host2",1234); Resource capability=BuilderUtils.newResource(1024,1); req.setResource(capability); req.setNodeId(nodeId); req.setHttpPort(1234); req.setNMVersion(nmVersion); RegisterNodeManagerResponse response=resourceTrackerService.registerNodeManager(req); Assert.assertEquals(NodeAction.SHUTDOWN,response.getNodeAction()); Assert.assertTrue("Diagnostic message did not contain: 'Disallowed NodeManager " + "Version " + nmVersion + ", is less than the minimum version'",response.getDiagnosticsMessage().contains("Disallowed NodeManager Version " + nmVersion + ", is less than the minimum version ")); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testReconnectNode() throws Exception { final DrainDispatcher dispatcher=new DrainDispatcher(); rm=new MockRM(){ @Override protected EventHandler createSchedulerEventDispatcher(){ return new SchedulerEventDispatcher(this.scheduler){ @Override public void handle( SchedulerEvent event){ scheduler.handle(event); } } ; } @Override protected Dispatcher createDispatcher(){ return dispatcher; } } ; rm.start(); MockNM nm1=rm.registerNode("host1:1234",5120); MockNM nm2=rm.registerNode("host2:5678",5120); nm1.nodeHeartbeat(true); nm2.nodeHeartbeat(false); dispatcher.await(); checkUnealthyNMCount(rm,nm2,true,1); final int expectedNMs=ClusterMetrics.getMetrics().getNumActiveNMs(); QueueMetrics metrics=rm.getResourceScheduler().getRootQueueMetrics(); Assert.assertEquals(5120,metrics.getAvailableMB()); nm1=rm.registerNode("host1:1234",5120); NodeHeartbeatResponse response=nm1.nodeHeartbeat(true); Assert.assertTrue(NodeAction.NORMAL.equals(response.getNodeAction())); dispatcher.await(); Assert.assertEquals(expectedNMs,ClusterMetrics.getMetrics().getNumActiveNMs()); checkUnealthyNMCount(rm,nm2,true,1); nm2=rm.registerNode("host2:5678",5120); response=nm2.nodeHeartbeat(false); Assert.assertTrue(NodeAction.NORMAL.equals(response.getNodeAction())); dispatcher.await(); Assert.assertEquals(expectedNMs,ClusterMetrics.getMetrics().getNumActiveNMs()); checkUnealthyNMCount(rm,nm2,true,1); nm2=rm.registerNode("host2:5678",5120); dispatcher.await(); response=nm2.nodeHeartbeat(true); response=nm2.nodeHeartbeat(true); dispatcher.await(); Assert.assertEquals(5120 + 5120,metrics.getAvailableMB()); nm1=rm.registerNode("host2:5678",10240); dispatcher.await(); response=nm1.nodeHeartbeat(true); dispatcher.await(); Assert.assertTrue(NodeAction.NORMAL.equals(response.getNodeAction())); Assert.assertEquals(5120 + 10240,metrics.getAvailableMB()); }

Class: org.apache.hadoop.yarn.server.resourcemanager.TestSubmitApplicationWithRMHA

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=5000) public void testHandleRMHADuringSubmitApplicationCallWithSavedApplicationState() throws Exception { startRMs(); RMApp app0=rm1.submitApp(200); explicitFailover(); Assert.assertTrue(rm2.getRMContext().getRMApps().containsKey(app0.getApplicationId())); RMApp app1=rm2.submitApp(200,"",UserGroupInformation.getCurrentUser().getShortUserName(),null,false,null,configuration.getInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS),null,null,false,false,true,app0.getApplicationId()); Assert.assertEquals(app1.getApplicationId(),app0.getApplicationId()); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test multiple calls of getApplicationReport, to make sure * it is idempotent */ @Test public void testGetApplicationReportIdempotent() throws Exception { startRMs(); RMApp app=rm1.submitApp(200); ApplicationReport appReport1=rm1.getApplicationReport(app.getApplicationId()); Assert.assertTrue(appReport1.getYarnApplicationState() == YarnApplicationState.ACCEPTED || appReport1.getYarnApplicationState() == YarnApplicationState.SUBMITTED); ApplicationReport appReport2=rm1.getApplicationReport(app.getApplicationId()); Assert.assertEquals(appReport1.getApplicationId(),appReport2.getApplicationId()); Assert.assertEquals(appReport1.getYarnApplicationState(),appReport2.getYarnApplicationState()); explicitFailover(); ApplicationReport appReport3=rm2.getApplicationReport(app.getApplicationId()); Assert.assertEquals(appReport1.getApplicationId(),appReport3.getApplicationId()); Assert.assertEquals(appReport1.getYarnApplicationState(),appReport3.getYarnApplicationState()); ApplicationReport appReport4=rm2.getApplicationReport(app.getApplicationId()); Assert.assertEquals(appReport3.getApplicationId(),appReport4.getApplicationId()); Assert.assertEquals(appReport3.getYarnApplicationState(),appReport4.getYarnApplicationState()); }

Class: org.apache.hadoop.yarn.server.resourcemanager.TestWorkPreservingRMRestart

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=20000) public void testContainersNotRecoveredForCompletedApps() throws Exception { MemoryRMStateStore memStore=new MemoryRMStateStore(); memStore.init(conf); rm1=new MockRM(conf,memStore); rm1.start(); MockNM nm1=new MockNM("127.0.0.1:1234",8192,rm1.getResourceTrackerService()); nm1.registerNode(); RMApp app1=rm1.submitApp(200); MockAM am1=MockRM.launchAndRegisterAM(app1,rm1,nm1); MockRM.finishAMAndVerifyAppState(app1,rm1,nm1,am1); rm2=new MockRM(conf,memStore); rm2.start(); nm1.setResourceTrackerService(rm2.getResourceTrackerService()); NMContainerStatus runningContainer=TestRMRestart.createNMContainerStatus(am1.getApplicationAttemptId(),2,ContainerState.RUNNING); NMContainerStatus completedContainer=TestRMRestart.createNMContainerStatus(am1.getApplicationAttemptId(),3,ContainerState.COMPLETE); nm1.registerNode(Arrays.asList(runningContainer,completedContainer),null); RMApp recoveredApp1=rm2.getRMContext().getRMApps().get(app1.getApplicationId()); assertEquals(RMAppState.FINISHED,recoveredApp1.getState()); Thread.sleep(3000); AbstractYarnScheduler scheduler=(AbstractYarnScheduler)rm2.getResourceScheduler(); assertNull(scheduler.getRMContainer(runningContainer.getContainerId())); assertNull(scheduler.getRMContainer(completedContainer.getContainerId())); }

APIUtilityVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=20000) public void testSchedulerRecovery() throws Exception { conf.setBoolean(CapacitySchedulerConfiguration.ENABLE_USER_METRICS,true); conf.set(CapacitySchedulerConfiguration.RESOURCE_CALCULATOR_CLASS,DominantResourceCalculator.class.getName()); int containerMemory=1024; Resource containerResource=Resource.newInstance(containerMemory,1); MemoryRMStateStore memStore=new MemoryRMStateStore(); memStore.init(conf); rm1=new MockRM(conf,memStore); rm1.start(); MockNM nm1=new MockNM("127.0.0.1:1234",8192,rm1.getResourceTrackerService()); nm1.registerNode(); RMApp app1=rm1.submitApp(200); MockAM am1=MockRM.launchAndRegisterAM(app1,rm1,nm1); rm1.clearQueueMetrics(app1); rm2=new MockRM(conf,memStore); rm2.start(); nm1.setResourceTrackerService(rm2.getResourceTrackerService()); RMApp recoveredApp1=rm2.getRMContext().getRMApps().get(app1.getApplicationId()); RMAppAttempt loadedAttempt1=recoveredApp1.getCurrentAppAttempt(); NMContainerStatus amContainer=TestRMRestart.createNMContainerStatus(am1.getApplicationAttemptId(),1,ContainerState.RUNNING); NMContainerStatus runningContainer=TestRMRestart.createNMContainerStatus(am1.getApplicationAttemptId(),2,ContainerState.RUNNING); NMContainerStatus completedContainer=TestRMRestart.createNMContainerStatus(am1.getApplicationAttemptId(),3,ContainerState.COMPLETE); nm1.registerNode(Arrays.asList(amContainer,runningContainer,completedContainer),null); waitForNumContainersToRecover(2,rm2,am1.getApplicationAttemptId()); Set launchedContainers=((RMNodeImpl)rm2.getRMContext().getRMNodes().get(nm1.getNodeId())).getLaunchedContainers(); assertTrue(launchedContainers.contains(amContainer.getContainerId())); assertTrue(launchedContainers.contains(runningContainer.getContainerId())); rm2.waitForState(nm1,amContainer.getContainerId(),RMContainerState.RUNNING); rm2.waitForState(nm1,runningContainer.getContainerId(),RMContainerState.RUNNING); rm2.waitForContainerToComplete(loadedAttempt1,completedContainer); AbstractYarnScheduler scheduler=(AbstractYarnScheduler)rm2.getResourceScheduler(); SchedulerNode schedulerNode1=scheduler.getSchedulerNode(nm1.getNodeId()); Resource usedResources=Resources.multiply(containerResource,2); Resource nmResource=Resource.newInstance(nm1.getMemory(),nm1.getvCores()); assertTrue(schedulerNode1.isValidContainer(amContainer.getContainerId())); assertTrue(schedulerNode1.isValidContainer(runningContainer.getContainerId())); assertFalse(schedulerNode1.isValidContainer(completedContainer.getContainerId())); assertEquals(2,schedulerNode1.getNumContainers()); assertEquals(Resources.subtract(nmResource,usedResources),schedulerNode1.getAvailableResource()); assertEquals(usedResources,schedulerNode1.getUsedResource()); Resource availableResources=Resources.subtract(nmResource,usedResources); Map schedulerApps=((AbstractYarnScheduler)rm2.getResourceScheduler()).getSchedulerApplications(); SchedulerApplication schedulerApp=schedulerApps.get(recoveredApp1.getApplicationId()); if (schedulerClass.equals(CapacityScheduler.class)) { checkCSQueue(rm2,schedulerApp,nmResource,nmResource,usedResources,2); } else if (schedulerClass.equals(FifoScheduler.class)) { checkFifoQueue(schedulerApp,usedResources,availableResources); } SchedulerApplicationAttempt schedulerAttempt=schedulerApp.getCurrentAppAttempt(); assertTrue(schedulerAttempt.getLiveContainers().contains(scheduler.getRMContainer(amContainer.getContainerId()))); assertTrue(schedulerAttempt.getLiveContainers().contains(scheduler.getRMContainer(runningContainer.getContainerId()))); assertEquals(schedulerAttempt.getCurrentConsumption(),usedResources); if (scheduler.getClass() != FairScheduler.class) { assertEquals(availableResources,schedulerAttempt.getHeadroom()); } assertEquals((1 << 22) + 1,schedulerAttempt.getNewContainerId()); }

Class: org.apache.hadoop.yarn.server.resourcemanager.ahs.TestRMApplicationHistoryWriter

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testWriteContainer() throws Exception { RMContainer container=createRMContainer(ContainerId.newInstance(ApplicationAttemptId.newInstance(ApplicationId.newInstance(0,1),1),1)); writer.containerStarted(container); ContainerHistoryData containerHD=null; for (int i=0; i < MAX_RETRIES; ++i) { containerHD=store.getContainer(ContainerId.newInstance(ApplicationAttemptId.newInstance(ApplicationId.newInstance(0,1),1),1)); if (containerHD != null) { break; } else { Thread.sleep(100); } } Assert.assertNotNull(containerHD); Assert.assertEquals(NodeId.newInstance("test host",-100),containerHD.getAssignedNode()); Assert.assertEquals(Resource.newInstance(-1,-1),containerHD.getAllocatedResource()); Assert.assertEquals(Priority.UNDEFINED,containerHD.getPriority()); Assert.assertEquals(0L,container.getCreationTime()); writer.containerFinished(container); for (int i=0; i < MAX_RETRIES; ++i) { containerHD=store.getContainer(ContainerId.newInstance(ApplicationAttemptId.newInstance(ApplicationId.newInstance(0,1),1),1)); if (containerHD.getContainerState() != null) { break; } else { Thread.sleep(100); } } Assert.assertEquals("test diagnostics info",containerHD.getDiagnosticsInfo()); Assert.assertEquals(-1,containerHD.getContainerExitStatus()); Assert.assertEquals(ContainerState.COMPLETE,containerHD.getContainerState()); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testWriteApplicationAttempt() throws Exception { RMAppAttempt appAttempt=createRMAppAttempt(ApplicationAttemptId.newInstance(ApplicationId.newInstance(0,1),1)); writer.applicationAttemptStarted(appAttempt); ApplicationAttemptHistoryData appAttemptHD=null; for (int i=0; i < MAX_RETRIES; ++i) { appAttemptHD=store.getApplicationAttempt(ApplicationAttemptId.newInstance(ApplicationId.newInstance(0,1),1)); if (appAttemptHD != null) { break; } else { Thread.sleep(100); } } Assert.assertNotNull(appAttemptHD); Assert.assertEquals("test host",appAttemptHD.getHost()); Assert.assertEquals(-100,appAttemptHD.getRPCPort()); Assert.assertEquals(ContainerId.newInstance(ApplicationAttemptId.newInstance(ApplicationId.newInstance(0,1),1),1),appAttemptHD.getMasterContainerId()); writer.applicationAttemptFinished(appAttempt,RMAppAttemptState.FINISHED); for (int i=0; i < MAX_RETRIES; ++i) { appAttemptHD=store.getApplicationAttempt(ApplicationAttemptId.newInstance(ApplicationId.newInstance(0,1),1)); if (appAttemptHD.getYarnApplicationAttemptState() != null) { break; } else { Thread.sleep(100); } } Assert.assertEquals("test diagnostics info",appAttemptHD.getDiagnosticsInfo()); Assert.assertEquals("test url",appAttemptHD.getTrackingURL()); Assert.assertEquals(FinalApplicationStatus.UNDEFINED,appAttemptHD.getFinalApplicationStatus()); Assert.assertEquals(YarnApplicationAttemptState.FINISHED,appAttemptHD.getYarnApplicationAttemptState()); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testWriteApplication() throws Exception { RMApp app=createRMApp(ApplicationId.newInstance(0,1)); writer.applicationStarted(app); ApplicationHistoryData appHD=null; for (int i=0; i < MAX_RETRIES; ++i) { appHD=store.getApplication(ApplicationId.newInstance(0,1)); if (appHD != null) { break; } else { Thread.sleep(100); } } Assert.assertNotNull(appHD); Assert.assertEquals("test app",appHD.getApplicationName()); Assert.assertEquals("test app type",appHD.getApplicationType()); Assert.assertEquals("test user",appHD.getUser()); Assert.assertEquals("test queue",appHD.getQueue()); Assert.assertEquals(0L,appHD.getSubmitTime()); Assert.assertEquals(1L,appHD.getStartTime()); writer.applicationFinished(app,RMAppState.FINISHED); for (int i=0; i < MAX_RETRIES; ++i) { appHD=store.getApplication(ApplicationId.newInstance(0,1)); if (appHD.getYarnApplicationState() != null) { break; } else { Thread.sleep(100); } } Assert.assertEquals(2L,appHD.getFinishTime()); Assert.assertEquals("test diagnostics info",appHD.getDiagnosticsInfo()); Assert.assertEquals(FinalApplicationStatus.UNDEFINED,appHD.getFinalApplicationStatus()); Assert.assertEquals(YarnApplicationState.FINISHED,appHD.getYarnApplicationState()); }

Class: org.apache.hadoop.yarn.server.resourcemanager.applicationsmanager.TestAMRMRPCResponseId

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testARRMResponseId() throws Exception { MockNM nm1=rm.registerNode("h1:1234",5000); RMApp app=rm.submitApp(2000); nm1.nodeHeartbeat(true); RMAppAttempt attempt=app.getCurrentAppAttempt(); MockAM am=rm.sendAMLaunched(attempt.getAppAttemptId()); am.registerAppAttempt(); AllocateRequest allocateRequest=AllocateRequest.newInstance(0,0F,null,null,null); AllocateResponse response=allocate(attempt.getAppAttemptId(),allocateRequest); Assert.assertEquals(1,response.getResponseId()); Assert.assertTrue(response.getAMCommand() == null); allocateRequest=AllocateRequest.newInstance(response.getResponseId(),0F,null,null,null); response=allocate(attempt.getAppAttemptId(),allocateRequest); Assert.assertEquals(2,response.getResponseId()); response=allocate(attempt.getAppAttemptId(),allocateRequest); Assert.assertEquals(2,response.getResponseId()); allocateRequest=AllocateRequest.newInstance(0,0F,null,null,null); response=allocate(attempt.getAppAttemptId(),allocateRequest); Assert.assertTrue(response.getAMCommand() == AMCommand.AM_RESYNC); }

Class: org.apache.hadoop.yarn.server.resourcemanager.applicationsmanager.TestAMRestart

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testAMRestartWithExistingContainers() throws Exception { YarnConfiguration conf=new YarnConfiguration(); conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,2); MockRM rm1=new MockRM(conf); rm1.start(); RMApp app1=rm1.submitApp(200,"name","user",new HashMap(),false,"default",-1,null,"MAPREDUCE",false,true); MockNM nm1=new MockNM("127.0.0.1:1234",10240,rm1.getResourceTrackerService()); nm1.registerNode(); MockNM nm2=new MockNM("127.0.0.1:2351",4089,rm1.getResourceTrackerService()); nm2.registerNode(); MockAM am1=MockRM.launchAndRegisterAM(app1,rm1,nm1); int NUM_CONTAINERS=3; am1.allocate("127.0.0.1",1024,NUM_CONTAINERS,new ArrayList()); nm1.nodeHeartbeat(true); List containers=am1.allocate(new ArrayList(),new ArrayList()).getAllocatedContainers(); while (containers.size() != NUM_CONTAINERS) { nm1.nodeHeartbeat(true); containers.addAll(am1.allocate(new ArrayList(),new ArrayList()).getAllocatedContainers()); Thread.sleep(200); } nm1.nodeHeartbeat(am1.getApplicationAttemptId(),2,ContainerState.RUNNING); ContainerId containerId2=ContainerId.newInstance(am1.getApplicationAttemptId(),2); rm1.waitForState(nm1,containerId2,RMContainerState.RUNNING); nm1.nodeHeartbeat(am1.getApplicationAttemptId(),3,ContainerState.RUNNING); ContainerId containerId3=ContainerId.newInstance(am1.getApplicationAttemptId(),3); rm1.waitForState(nm1,containerId3,RMContainerState.RUNNING); ContainerId containerId4=ContainerId.newInstance(am1.getApplicationAttemptId(),4); rm1.waitForState(nm1,containerId4,RMContainerState.ACQUIRED); am1.allocate("127.0.0.1",1024,1,new ArrayList()); nm1.nodeHeartbeat(true); ContainerId containerId5=ContainerId.newInstance(am1.getApplicationAttemptId(),5); rm1.waitForContainerAllocated(nm1,containerId5); rm1.waitForState(nm1,containerId5,RMContainerState.ALLOCATED); am1.allocate("127.0.0.1",6000,1,new ArrayList()); ContainerId containerId6=ContainerId.newInstance(am1.getApplicationAttemptId(),6); nm1.nodeHeartbeat(true); SchedulerApplicationAttempt schedulerAttempt=((AbstractYarnScheduler)rm1.getResourceScheduler()).getCurrentAttemptForContainer(containerId6); while (schedulerAttempt.getReservedContainers().isEmpty()) { System.out.println("Waiting for container " + containerId6 + " to be reserved."); nm1.nodeHeartbeat(true); Thread.sleep(200); } Assert.assertEquals(containerId6,schedulerAttempt.getReservedContainers().get(0).getContainerId()); nm1.nodeHeartbeat(am1.getApplicationAttemptId(),1,ContainerState.COMPLETE); am1.waitForState(RMAppAttemptState.FAILED); Thread.sleep(3000); rm1.waitForState(nm1,containerId2,RMContainerState.RUNNING); Assert.assertNull(rm1.getResourceScheduler().getRMContainer(containerId4)); Assert.assertNull(rm1.getResourceScheduler().getRMContainer(containerId5)); rm1.waitForState(app1.getApplicationId(),RMAppState.ACCEPTED); ApplicationAttemptId newAttemptId=app1.getCurrentAppAttempt().getAppAttemptId(); Assert.assertFalse(newAttemptId.equals(am1.getApplicationAttemptId())); RMAppAttempt attempt2=app1.getCurrentAppAttempt(); nm1.nodeHeartbeat(true); MockAM am2=rm1.sendAMLaunched(attempt2.getAppAttemptId()); RegisterApplicationMasterResponse registerResponse=am2.registerAppAttempt(); Assert.assertEquals(2,registerResponse.getContainersFromPreviousAttempts().size()); boolean containerId2Exists=false, containerId3Exists=false; for ( Container container : registerResponse.getContainersFromPreviousAttempts()) { if (container.getId().equals(containerId2)) { containerId2Exists=true; } if (container.getId().equals(containerId3)) { containerId3Exists=true; } } Assert.assertTrue(containerId2Exists && containerId3Exists); rm1.waitForState(app1.getApplicationId(),RMAppState.RUNNING); nm1.nodeHeartbeat(am1.getApplicationAttemptId(),3,ContainerState.COMPLETE); RMAppAttempt newAttempt=app1.getRMAppAttempt(am2.getApplicationAttemptId()); waitForContainersToFinish(4,newAttempt); boolean container3Exists=false, container4Exists=false, container5Exists=false, container6Exists=false; for ( ContainerStatus status : newAttempt.getJustFinishedContainers()) { if (status.getContainerId().equals(containerId3)) { container3Exists=true; } if (status.getContainerId().equals(containerId4)) { container4Exists=true; } if (status.getContainerId().equals(containerId5)) { container5Exists=true; } if (status.getContainerId().equals(containerId6)) { container6Exists=true; } } Assert.assertTrue(container3Exists && container4Exists && container5Exists&& container6Exists); rm1.waitForState(nm1,containerId2,RMContainerState.RUNNING); SchedulerApplicationAttempt schedulerNewAttempt=((AbstractYarnScheduler)rm1.getResourceScheduler()).getCurrentAttemptForContainer(containerId2); MockRM.finishAMAndVerifyAppState(app1,rm1,nm1,am2); Assert.assertFalse(schedulerNewAttempt.getLiveContainers().contains(containerId2)); System.out.println("New attempt's just finished containers: " + newAttempt.getJustFinishedContainers()); waitForContainersToFinish(5,newAttempt); rm1.stop(); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testNMTokensRebindOnAMRestart() throws Exception { YarnConfiguration conf=new YarnConfiguration(); conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,3); MockRM rm1=new MockRM(conf); rm1.start(); RMApp app1=rm1.submitApp(200,"myname","myuser",new HashMap(),false,"default",-1,null,"MAPREDUCE",false,true); MockNM nm1=new MockNM("127.0.0.1:1234",8000,rm1.getResourceTrackerService()); nm1.registerNode(); MockNM nm2=new MockNM("127.1.1.1:4321",8000,rm1.getResourceTrackerService()); nm2.registerNode(); MockAM am1=MockRM.launchAndRegisterAM(app1,rm1,nm1); List containers=new ArrayList(); List expectedNMTokens=new ArrayList(); while (true) { AllocateResponse response=am1.allocate("127.0.0.1",2000,2,new ArrayList()); nm1.nodeHeartbeat(true); containers.addAll(response.getAllocatedContainers()); expectedNMTokens.addAll(response.getNMTokens()); if (containers.size() == 2) { break; } Thread.sleep(200); System.out.println("Waiting for container to be allocated."); } nm1.nodeHeartbeat(am1.getApplicationAttemptId(),2,ContainerState.RUNNING); ContainerId containerId2=ContainerId.newInstance(am1.getApplicationAttemptId(),2); rm1.waitForState(nm1,containerId2,RMContainerState.RUNNING); nm1.nodeHeartbeat(am1.getApplicationAttemptId(),3,ContainerState.RUNNING); ContainerId containerId3=ContainerId.newInstance(am1.getApplicationAttemptId(),3); rm1.waitForState(nm1,containerId3,RMContainerState.RUNNING); nm1.nodeHeartbeat(am1.getApplicationAttemptId(),1,ContainerState.COMPLETE); am1.waitForState(RMAppAttemptState.FAILED); rm1.waitForState(app1.getApplicationId(),RMAppState.ACCEPTED); MockAM am2=MockRM.launchAM(app1,rm1,nm1); RegisterApplicationMasterResponse registerResponse=am2.registerAppAttempt(); rm1.waitForState(app1.getApplicationId(),RMAppState.RUNNING); Assert.assertEquals(expectedNMTokens,registerResponse.getNMTokensFromPreviousAttempts()); containers=new ArrayList(); while (true) { AllocateResponse allocateResponse=am2.allocate("127.1.1.1",4000,1,new ArrayList()); nm2.nodeHeartbeat(true); containers.addAll(allocateResponse.getAllocatedContainers()); expectedNMTokens.addAll(allocateResponse.getNMTokens()); if (containers.size() == 1) { break; } Thread.sleep(200); System.out.println("Waiting for container to be allocated."); } nm1.nodeHeartbeat(am2.getApplicationAttemptId(),2,ContainerState.RUNNING); ContainerId am2ContainerId2=ContainerId.newInstance(am2.getApplicationAttemptId(),2); rm1.waitForState(nm1,am2ContainerId2,RMContainerState.RUNNING); nm1.nodeHeartbeat(am2.getApplicationAttemptId(),1,ContainerState.COMPLETE); am2.waitForState(RMAppAttemptState.FAILED); rm1.waitForState(app1.getApplicationId(),RMAppState.ACCEPTED); MockAM am3=MockRM.launchAM(app1,rm1,nm1); registerResponse=am3.registerAppAttempt(); rm1.waitForState(app1.getApplicationId(),RMAppState.RUNNING); List transferredTokens=registerResponse.getNMTokensFromPreviousAttempts(); Assert.assertEquals(2,transferredTokens.size()); Assert.assertTrue(transferredTokens.containsAll(expectedNMTokens)); rm1.stop(); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=20000) public void testPreemptedAMRestartOnRMRestart() throws Exception { YarnConfiguration conf=new YarnConfiguration(); conf.setClass(YarnConfiguration.RM_SCHEDULER,CapacityScheduler.class,ResourceScheduler.class); conf.setBoolean(YarnConfiguration.RECOVERY_ENABLED,true); conf.set(YarnConfiguration.RM_STORE,MemoryRMStateStore.class.getName()); conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,1); MemoryRMStateStore memStore=new MemoryRMStateStore(); memStore.init(conf); MockRM rm1=new MockRM(conf,memStore); rm1.start(); MockNM nm1=new MockNM("127.0.0.1:1234",8000,rm1.getResourceTrackerService()); nm1.registerNode(); RMApp app1=rm1.submitApp(200); RMAppAttempt attempt1=app1.getCurrentAppAttempt(); MockAM am1=MockRM.launchAndRegisterAM(app1,rm1,nm1); CapacityScheduler scheduler=(CapacityScheduler)rm1.getResourceScheduler(); ContainerId amContainer=ContainerId.newInstance(am1.getApplicationAttemptId(),1); scheduler.killContainer(scheduler.getRMContainer(amContainer)); am1.waitForState(RMAppAttemptState.FAILED); Assert.assertTrue(!attempt1.shouldCountTowardsMaxAttemptRetry()); rm1.waitForState(app1.getApplicationId(),RMAppState.ACCEPTED); ApplicationState appState=memStore.getState().getApplicationState().get(app1.getApplicationId()); Assert.assertEquals(1,appState.getAttemptCount()); Assert.assertEquals(ContainerExitStatus.PREEMPTED,appState.getAttempt(am1.getApplicationAttemptId()).getAMContainerExitStatus()); MockRM rm2=new MockRM(conf,memStore); nm1.setResourceTrackerService(rm2.getResourceTrackerService()); nm1.registerNode(); rm2.start(); MockAM am2=rm2.waitForNewAMToLaunchAndRegister(app1.getApplicationId(),2,nm1); MockRM.finishAMAndVerifyAppState(app1,rm2,nm1,am2); RMAppAttempt attempt2=rm2.getRMContext().getRMApps().get(app1.getApplicationId()).getCurrentAppAttempt(); Assert.assertTrue(attempt2.shouldCountTowardsMaxAttemptRetry()); Assert.assertEquals(ContainerExitStatus.INVALID,appState.getAttempt(am2.getApplicationAttemptId()).getAMContainerExitStatus()); rm1.stop(); rm2.stop(); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=100000) public void testShouldNotCountFailureToMaxAttemptRetry() throws Exception { YarnConfiguration conf=new YarnConfiguration(); conf.setClass(YarnConfiguration.RM_SCHEDULER,CapacityScheduler.class,ResourceScheduler.class); conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,1); conf.setBoolean(YarnConfiguration.RECOVERY_ENABLED,true); conf.set(YarnConfiguration.RM_STORE,MemoryRMStateStore.class.getName()); MemoryRMStateStore memStore=new MemoryRMStateStore(); memStore.init(conf); MockRM rm1=new MockRM(conf,memStore); rm1.start(); MockNM nm1=new MockNM("127.0.0.1:1234",8000,rm1.getResourceTrackerService()); nm1.registerNode(); RMApp app1=rm1.submitApp(200); RMAppAttempt attempt1=app1.getCurrentAppAttempt(); MockAM am1=MockRM.launchAndRegisterAM(app1,rm1,nm1); CapacityScheduler scheduler=(CapacityScheduler)rm1.getResourceScheduler(); ContainerId amContainer=ContainerId.newInstance(am1.getApplicationAttemptId(),1); scheduler.killContainer(scheduler.getRMContainer(amContainer)); am1.waitForState(RMAppAttemptState.FAILED); Assert.assertTrue(!attempt1.shouldCountTowardsMaxAttemptRetry()); rm1.waitForState(app1.getApplicationId(),RMAppState.ACCEPTED); ApplicationState appState=memStore.getState().getApplicationState().get(app1.getApplicationId()); MockAM am2=rm1.waitForNewAMToLaunchAndRegister(app1.getApplicationId(),2,nm1); RMAppAttempt attempt2=app1.getCurrentAppAttempt(); Assert.assertTrue(((RMAppAttemptImpl)attempt2).mayBeLastAttempt()); ContainerId amContainer2=ContainerId.newInstance(am2.getApplicationAttemptId(),1); scheduler.killContainer(scheduler.getRMContainer(amContainer2)); am2.waitForState(RMAppAttemptState.FAILED); Assert.assertTrue(!attempt2.shouldCountTowardsMaxAttemptRetry()); rm1.waitForState(app1.getApplicationId(),RMAppState.ACCEPTED); MockAM am3=rm1.waitForNewAMToLaunchAndRegister(app1.getApplicationId(),3,nm1); RMAppAttempt attempt3=app1.getCurrentAppAttempt(); Assert.assertTrue(((RMAppAttemptImpl)attempt3).mayBeLastAttempt()); ContainerStatus containerStatus=Records.newRecord(ContainerStatus.class); containerStatus.setContainerId(attempt3.getMasterContainer().getId()); containerStatus.setDiagnostics("mimic NM disk_failure"); containerStatus.setState(ContainerState.COMPLETE); containerStatus.setExitStatus(ContainerExitStatus.DISKS_FAILED); Map> conts=new HashMap>(); conts.put(app1.getApplicationId(),Collections.singletonList(containerStatus)); nm1.nodeHeartbeat(conts,true); am3.waitForState(RMAppAttemptState.FAILED); Assert.assertTrue(!attempt3.shouldCountTowardsMaxAttemptRetry()); Assert.assertEquals(ContainerExitStatus.DISKS_FAILED,appState.getAttempt(am3.getApplicationAttemptId()).getAMContainerExitStatus()); rm1.waitForState(app1.getApplicationId(),RMAppState.ACCEPTED); MockAM am4=rm1.waitForNewAMToLaunchAndRegister(app1.getApplicationId(),4,nm1); RMAppAttempt attempt4=app1.getCurrentAppAttempt(); Assert.assertTrue(((RMAppAttemptImpl)attempt4).mayBeLastAttempt()); MockNM nm2=new MockNM("127.0.0.1:2234",8000,rm1.getResourceTrackerService()); nm2.registerNode(); nm1.nodeHeartbeat(false); am4.waitForState(RMAppAttemptState.FAILED); Assert.assertTrue(!attempt4.shouldCountTowardsMaxAttemptRetry()); Assert.assertEquals(ContainerExitStatus.ABORTED,appState.getAttempt(am4.getApplicationAttemptId()).getAMContainerExitStatus()); nm2.nodeHeartbeat(true); MockAM am5=rm1.waitForNewAMToLaunchAndRegister(app1.getApplicationId(),5,nm2); RMAppAttempt attempt5=app1.getCurrentAppAttempt(); Assert.assertTrue(((RMAppAttemptImpl)attempt5).mayBeLastAttempt()); nm2.nodeHeartbeat(am5.getApplicationAttemptId(),1,ContainerState.COMPLETE); am5.waitForState(RMAppAttemptState.FAILED); Assert.assertTrue(attempt5.shouldCountTowardsMaxAttemptRetry()); rm1.waitForState(app1.getApplicationId(),RMAppState.FAILED); Assert.assertEquals(5,app1.getAppAttempts().size()); rm1.stop(); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=50000) public void testRMRestartOrFailoverNotCountedForAMFailures() throws Exception { YarnConfiguration conf=new YarnConfiguration(); conf.setClass(YarnConfiguration.RM_SCHEDULER,CapacityScheduler.class,ResourceScheduler.class); conf.setBoolean(YarnConfiguration.RECOVERY_ENABLED,true); conf.set(YarnConfiguration.RM_STORE,MemoryRMStateStore.class.getName()); conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,1); MemoryRMStateStore memStore=new MemoryRMStateStore(); memStore.init(conf); MockRM rm1=new MockRM(conf,memStore); rm1.start(); MockNM nm1=new MockNM("127.0.0.1:1234",8000,rm1.getResourceTrackerService()); nm1.registerNode(); RMApp app1=rm1.submitApp(200); MockAM am1=MockRM.launchAndRegisterAM(app1,rm1,nm1); RMAppAttempt attempt1=app1.getCurrentAppAttempt(); Assert.assertTrue(((RMAppAttemptImpl)attempt1).mayBeLastAttempt()); MockRM rm2=new MockRM(conf,memStore); rm2.start(); ApplicationState appState=memStore.getState().getApplicationState().get(app1.getApplicationId()); nm1.setResourceTrackerService(rm2.getResourceTrackerService()); NMContainerStatus status=Records.newRecord(NMContainerStatus.class); status.setContainerExitStatus(ContainerExitStatus.KILLED_BY_RESOURCEMANAGER); status.setContainerId(attempt1.getMasterContainer().getId()); status.setContainerState(ContainerState.COMPLETE); status.setDiagnostics(""); nm1.registerNode(Collections.singletonList(status),null); rm2.waitForState(attempt1.getAppAttemptId(),RMAppAttemptState.FAILED); Assert.assertEquals(ContainerExitStatus.KILLED_BY_RESOURCEMANAGER,appState.getAttempt(am1.getApplicationAttemptId()).getAMContainerExitStatus()); rm2.waitForState(app1.getApplicationId(),RMAppState.ACCEPTED); MockAM am2=rm2.waitForNewAMToLaunchAndRegister(app1.getApplicationId(),2,nm1); MockRM.finishAMAndVerifyAppState(app1,rm2,nm1,am2); RMAppAttempt attempt3=rm2.getRMContext().getRMApps().get(app1.getApplicationId()).getCurrentAppAttempt(); Assert.assertTrue(attempt3.shouldCountTowardsMaxAttemptRetry()); Assert.assertEquals(ContainerExitStatus.INVALID,appState.getAttempt(am2.getApplicationAttemptId()).getAMContainerExitStatus()); rm1.stop(); rm2.stop(); }

Class: org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.TestProportionalCapacityPreemptionPolicy

BranchVerifier UtilityVerifier NullVerifier HybridVerifier 
@Test public void testPolicyInitializeAfterSchedulerInitialized(){ Configuration conf=new Configuration(); conf.set(YarnConfiguration.RM_SCHEDULER_MONITOR_POLICIES,ProportionalCapacityPreemptionPolicy.class.getCanonicalName()); conf.setBoolean(YarnConfiguration.RM_SCHEDULER_ENABLE_MONITORS,true); @SuppressWarnings("resource") MockRM rm=new MockRM(conf); rm.init(conf); for ( Service service : rm.getRMActiveService().getServices()) { if (service instanceof SchedulingMonitor) { ProportionalCapacityPreemptionPolicy policy=(ProportionalCapacityPreemptionPolicy)((SchedulingMonitor)service).getSchedulingEditPolicy(); assertNotNull(policy.getResourceCalculator()); return; } } fail("Failed to find SchedulingMonitor service, please check what happened"); }

Class: org.apache.hadoop.yarn.server.resourcemanager.recovery.TestZKRMStateStoreZKClientConnections

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test(timeout=20000) public void testZKSessionTimeout() throws Exception { TestZKClient zkClientTester=new TestZKClient(); String path="/test"; YarnConfiguration conf=new YarnConfiguration(); conf.setInt(YarnConfiguration.RM_ZK_TIMEOUT_MS,ZK_TIMEOUT_MS); ZKRMStateStore store=(ZKRMStateStore)zkClientTester.getRMStateStore(conf); TestDispatcher dispatcher=new TestDispatcher(); store.setRMDispatcher(dispatcher); zkClientTester.forExpire=true; store.createWithRetries(path,null,ZooDefs.Ids.OPEN_ACL_UNSAFE,CreateMode.PERSISTENT); store.getDataWithRetries(path,true); store.setDataWithRetries(path,"bytes".getBytes(),0); zkClientTester.syncBarrier.await(); try { byte[] ret=store.getDataWithRetries(path,false); assertEquals("bytes",new String(ret)); } catch ( Exception e) { String error="New session creation failed"; LOG.error(error,e); fail(error); } }

UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=20000) public void testZKClientDisconnectAndReconnect() throws Exception { TestZKClient zkClientTester=new TestZKClient(); String path="/test"; YarnConfiguration conf=new YarnConfiguration(); conf.setInt(YarnConfiguration.RM_ZK_TIMEOUT_MS,ZK_TIMEOUT_MS); ZKRMStateStore store=(ZKRMStateStore)zkClientTester.getRMStateStore(conf); TestDispatcher dispatcher=new TestDispatcher(); store.setRMDispatcher(dispatcher); store.createWithRetries(path,null,ZooDefs.Ids.OPEN_ACL_UNSAFE,CreateMode.PERSISTENT); store.getDataWithRetries(path,true); store.setDataWithRetries(path,"newBytes".getBytes(),0); stopServer(); zkClientTester.watcher.waitForDisconnected(ZK_OP_WAIT_TIME); try { store.getDataWithRetries(path,true); fail("Expected ZKClient time out exception"); } catch ( Exception e) { assertTrue(e.getMessage().contains("Wait for ZKClient creation timed out")); } startServer(); zkClientTester.watcher.waitForConnected(ZK_OP_WAIT_TIME); byte[] ret=null; try { ret=store.getDataWithRetries(path,true); } catch ( Exception e) { String error="ZKRMStateStore Session restore failed"; LOG.error(error,e); fail(error); } assertEquals("newBytes",new String(ret)); }

Class: org.apache.hadoop.yarn.server.resourcemanager.resourcetracker.TestRMNMRPCResponseId

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testRPCResponseId() throws IOException, YarnException { String node="localhost"; Resource capability=BuilderUtils.newResource(1024,1); RegisterNodeManagerRequest request=recordFactory.newRecordInstance(RegisterNodeManagerRequest.class); nodeId=NodeId.newInstance(node,1234); request.setNodeId(nodeId); request.setHttpPort(0); request.setResource(capability); RegisterNodeManagerRequest request1=recordFactory.newRecordInstance(RegisterNodeManagerRequest.class); request1.setNodeId(nodeId); request1.setHttpPort(0); request1.setResource(capability); resourceTrackerService.registerNodeManager(request1); org.apache.hadoop.yarn.server.api.records.NodeStatus nodeStatus=recordFactory.newRecordInstance(org.apache.hadoop.yarn.server.api.records.NodeStatus.class); nodeStatus.setNodeId(nodeId); NodeHealthStatus nodeHealthStatus=recordFactory.newRecordInstance(NodeHealthStatus.class); nodeHealthStatus.setIsNodeHealthy(true); nodeStatus.setNodeHealthStatus(nodeHealthStatus); NodeHeartbeatRequest nodeHeartBeatRequest=recordFactory.newRecordInstance(NodeHeartbeatRequest.class); nodeHeartBeatRequest.setNodeStatus(nodeStatus); nodeStatus.setResponseId(0); NodeHeartbeatResponse response=resourceTrackerService.nodeHeartbeat(nodeHeartBeatRequest); Assert.assertTrue(response.getResponseId() == 1); nodeStatus.setResponseId(response.getResponseId()); response=resourceTrackerService.nodeHeartbeat(nodeHeartBeatRequest); Assert.assertTrue(response.getResponseId() == 2); response=resourceTrackerService.nodeHeartbeat(nodeHeartBeatRequest); Assert.assertTrue(response.getResponseId() == 2); nodeStatus.setResponseId(0); response=resourceTrackerService.nodeHeartbeat(nodeHeartBeatRequest); Assert.assertTrue(NodeAction.RESYNC.equals(response.getNodeAction())); Assert.assertEquals("Too far behind rm response id:2 nm response id:0",response.getDiagnosticsMessage()); }

Class: org.apache.hadoop.yarn.server.resourcemanager.rmapp.TestRMAppTransitions

IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testAppRunningFailed() throws IOException { LOG.info("--- START: testAppRunningFailed ---"); RMApp application=testCreateAppRunning(null); RMAppAttempt appAttempt=application.getCurrentAppAttempt(); int expectedAttemptId=1; Assert.assertEquals(expectedAttemptId,appAttempt.getAppAttemptId().getAttemptId()); Assert.assertTrue(maxAppAttempts > 1); for (int i=1; i < maxAppAttempts; i++) { RMAppEvent event=new RMAppFailedAttemptEvent(application.getApplicationId(),RMAppEventType.ATTEMPT_FAILED,"",false); application.handle(event); rmDispatcher.await(); assertAppState(RMAppState.ACCEPTED,application); appAttempt=application.getCurrentAppAttempt(); Assert.assertEquals(++expectedAttemptId,appAttempt.getAppAttemptId().getAttemptId()); event=new RMAppEvent(application.getApplicationId(),RMAppEventType.APP_ACCEPTED); application.handle(event); rmDispatcher.await(); assertAppState(RMAppState.ACCEPTED,application); event=new RMAppEvent(application.getApplicationId(),RMAppEventType.ATTEMPT_REGISTERED); application.handle(event); rmDispatcher.await(); assertAppState(RMAppState.RUNNING,application); } RMAppEvent event=new RMAppFailedAttemptEvent(application.getApplicationId(),RMAppEventType.ATTEMPT_FAILED,"",false); application.handle(event); rmDispatcher.await(); sendAppUpdateSavedEvent(application); assertFailed(application,".*Failing the application.*"); assertAppFinalStateSaved(application); event=new RMAppEvent(application.getApplicationId(),RMAppEventType.KILL); application.handle(event); rmDispatcher.await(); assertFailed(application,".*Failing the application.*"); assertAppFinalStateSaved(application); verifyApplicationFinished(RMAppState.FAILED); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testGetAppReport(){ RMApp app=createNewTestApp(null); assertAppState(RMAppState.NEW,app); ApplicationReport report=app.createAndGetApplicationReport(null,true); Assert.assertNotNull(report.getApplicationResourceUsageReport()); Assert.assertEquals(report.getApplicationResourceUsageReport(),RMServerUtils.DUMMY_APPLICATION_RESOURCE_USAGE_REPORT); report=app.createAndGetApplicationReport("clientuser",true); Assert.assertNotNull(report.getApplicationResourceUsageReport()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testUnmanagedApp() throws IOException { ApplicationSubmissionContext subContext=new ApplicationSubmissionContextPBImpl(); subContext.setUnmanagedAM(true); LOG.info("--- START: testUnmanagedAppSuccessPath ---"); final String diagMsg="some diagnostics"; RMApp application=testCreateAppFinished(subContext,diagMsg); Assert.assertTrue("Finished app missing diagnostics",application.getDiagnostics().indexOf(diagMsg) != -1); reset(writer); LOG.info("--- START: testUnmanagedAppFailPath ---"); application=testCreateAppRunning(subContext); RMAppEvent event=new RMAppFailedAttemptEvent(application.getApplicationId(),RMAppEventType.ATTEMPT_FAILED,"",false); application.handle(event); rmDispatcher.await(); RMAppAttempt appAttempt=application.getCurrentAppAttempt(); Assert.assertEquals(1,appAttempt.getAppAttemptId().getAttemptId()); sendAppUpdateSavedEvent(application); assertFailed(application,".*Unmanaged application.*Failing the application.*"); assertAppFinalStateSaved(application); }

Class: org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.TestRMAppAttemptTransitions

InternalCallVerifier AssumptionSetter NullVerifier EqualityVerifier HybridVerifier 
@Test public void testGetClientToken() throws Exception { assumeTrue(isSecurityEnabled); Container amContainer=allocateApplicationAttempt(); Token token=applicationAttempt.createClientToken(null); Assert.assertNull(token); token=applicationAttempt.createClientToken("clientuser"); Assert.assertNull(token); launchApplicationAttempt(amContainer); token=applicationAttempt.createClientToken(null); Assert.assertNull(token); token=applicationAttempt.createClientToken("clientuser"); Assert.assertNotNull(token); applicationAttempt.handle(new RMAppAttemptEvent(applicationAttempt.getAppAttemptId(),RMAppAttemptEventType.KILL)); assertEquals(YarnApplicationAttemptState.LAUNCHED,applicationAttempt.createApplicationAttemptState()); sendAttemptUpdateSavedEvent(applicationAttempt); token=applicationAttempt.createClientToken(null); Assert.assertNull(token); token=applicationAttempt.createClientToken("clientuser"); Assert.assertNull(token); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testContainersCleanupForLastAttempt(){ applicationAttempt=new RMAppAttemptImpl(applicationAttempt.getAppAttemptId(),rmContext,scheduler,masterService,submissionContext,new Configuration(),true); when(submissionContext.getKeepContainersAcrossApplicationAttempts()).thenReturn(true); when(submissionContext.getMaxAppAttempts()).thenReturn(1); Container amContainer=allocateApplicationAttempt(); launchApplicationAttempt(amContainer); runApplicationAttempt(amContainer,"host",8042,"oldtrackingurl",false); ContainerStatus cs1=ContainerStatus.newInstance(amContainer.getId(),ContainerState.COMPLETE,"some error",123); ApplicationAttemptId appAttemptId=applicationAttempt.getAppAttemptId(); applicationAttempt.handle(new RMAppAttemptContainerFinishedEvent(appAttemptId,cs1)); assertEquals(YarnApplicationAttemptState.RUNNING,applicationAttempt.createApplicationAttemptState()); sendAttemptUpdateSavedEvent(applicationAttempt); assertEquals(RMAppAttemptState.FAILED,applicationAttempt.getAppAttemptState()); assertFalse(transferStateFromPreviousAttempt); verifyApplicationAttemptFinished(RMAppAttemptState.FAILED); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testFailedToFailed(){ when(submissionContext.getKeepContainersAcrossApplicationAttempts()).thenReturn(true); Container amContainer=allocateApplicationAttempt(); launchApplicationAttempt(amContainer); runApplicationAttempt(amContainer,"host",8042,"oldtrackingurl",false); ContainerStatus cs1=ContainerStatus.newInstance(amContainer.getId(),ContainerState.COMPLETE,"some error",123); ApplicationAttemptId appAttemptId=applicationAttempt.getAppAttemptId(); applicationAttempt.handle(new RMAppAttemptContainerFinishedEvent(appAttemptId,cs1)); assertEquals(YarnApplicationAttemptState.RUNNING,applicationAttempt.createApplicationAttemptState()); sendAttemptUpdateSavedEvent(applicationAttempt); assertEquals(RMAppAttemptState.FAILED,applicationAttempt.getAppAttemptState()); assertTrue(transferStateFromPreviousAttempt); verifyApplicationAttemptFinished(RMAppAttemptState.FAILED); assertEquals(0,applicationAttempt.getJustFinishedContainers().size()); ContainerStatus cs2=ContainerStatus.newInstance(ContainerId.newInstance(appAttemptId,2),ContainerState.COMPLETE,"",0); applicationAttempt.handle(new RMAppAttemptContainerFinishedEvent(appAttemptId,cs2)); assertEquals(1,applicationAttempt.getJustFinishedContainers().size()); assertEquals(cs2.getContainerId(),applicationAttempt.getJustFinishedContainers().get(0).getContainerId()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testLaunchedExpire(){ Container amContainer=allocateApplicationAttempt(); launchApplicationAttempt(amContainer); applicationAttempt.handle(new RMAppAttemptEvent(applicationAttempt.getAppAttemptId(),RMAppAttemptEventType.EXPIRE)); assertEquals(YarnApplicationAttemptState.LAUNCHED,applicationAttempt.createApplicationAttemptState()); sendAttemptUpdateSavedEvent(applicationAttempt); assertEquals(RMAppAttemptState.FAILED,applicationAttempt.getAppAttemptState()); assertTrue("expire diagnostics missing",applicationAttempt.getDiagnostics().contains("timed out")); String rmAppPageUrl=pjoin(RM_WEBAPP_ADDR,"cluster","app",applicationAttempt.getAppAttemptId().getApplicationId()); assertEquals(rmAppPageUrl,applicationAttempt.getOriginalTrackingUrl()); assertEquals(rmAppPageUrl,applicationAttempt.getTrackingUrl()); verifyTokenCount(applicationAttempt.getAppAttemptId(),1); verifyApplicationAttemptFinished(RMAppAttemptState.FAILED); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=20000) public void testRunningExpire(){ Container amContainer=allocateApplicationAttempt(); launchApplicationAttempt(amContainer); runApplicationAttempt(amContainer,"host",8042,"oldtrackingurl",false); applicationAttempt.handle(new RMAppAttemptEvent(applicationAttempt.getAppAttemptId(),RMAppAttemptEventType.EXPIRE)); assertEquals(YarnApplicationAttemptState.RUNNING,applicationAttempt.createApplicationAttemptState()); sendAttemptUpdateSavedEvent(applicationAttempt); assertEquals(RMAppAttemptState.FAILED,applicationAttempt.getAppAttemptState()); assertTrue("expire diagnostics missing",applicationAttempt.getDiagnostics().contains("timed out")); String rmAppPageUrl=pjoin(RM_WEBAPP_ADDR,"cluster","app",applicationAttempt.getAppAttemptId().getApplicationId()); assertEquals(rmAppPageUrl,applicationAttempt.getOriginalTrackingUrl()); assertEquals(rmAppPageUrl,applicationAttempt.getTrackingUrl()); verifyTokenCount(applicationAttempt.getAppAttemptId(),1); verifyAMHostAndPortInvalidated(); verifyApplicationAttemptFinished(RMAppAttemptState.FAILED); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testUnmanagedAMContainersCleanup(){ unmanagedAM=true; when(submissionContext.getUnmanagedAM()).thenReturn(true); when(submissionContext.getKeepContainersAcrossApplicationAttempts()).thenReturn(true); submitApplicationAttempt(); applicationAttempt.handle(new RMAppAttemptRegistrationEvent(applicationAttempt.getAppAttemptId(),"host",8042,"oldtrackingurl")); assertEquals(YarnApplicationAttemptState.SUBMITTED,applicationAttempt.createApplicationAttemptState()); sendAttemptUpdateSavedEvent(applicationAttempt); assertFalse(transferStateFromPreviousAttempt); }

Class: org.apache.hadoop.yarn.server.resourcemanager.scheduler.TestQueueMetrics

UtilityVerifier BooleanVerifier HybridVerifier 
@Test public void testCollectAllMetrics(){ String queueName="single"; QueueMetrics.forQueue(ms,queueName,null,false,conf); MetricsSource queueSource=queueSource(ms,queueName); checkApps(queueSource,0,0,0,0,0,0,true); try { checkApps(queueSource,0,0,0,0,0,0,false); Assert.fail(); } catch ( AssertionError e) { Assert.assertTrue(e.getMessage().contains("Expected exactly one metric for name ")); } checkApps(queueSource,0,0,0,0,0,0,true); }

Class: org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.TestApplicationLimits

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testActiveLimitsWithKilledApps() throws Exception { final String user_0="user_0"; int APPLICATION_ID=0; doReturn(2).when(queue).getMaximumActiveApplications(); FiCaSchedulerApp app_0=getMockApplication(APPLICATION_ID++,user_0); queue.submitApplicationAttempt(app_0,user_0); assertEquals(1,queue.getNumActiveApplications()); assertEquals(0,queue.getNumPendingApplications()); assertEquals(1,queue.getNumActiveApplications(user_0)); assertEquals(0,queue.getNumPendingApplications(user_0)); assertTrue(queue.activeApplications.contains(app_0)); FiCaSchedulerApp app_1=getMockApplication(APPLICATION_ID++,user_0); queue.submitApplicationAttempt(app_1,user_0); assertEquals(2,queue.getNumActiveApplications()); assertEquals(0,queue.getNumPendingApplications()); assertEquals(2,queue.getNumActiveApplications(user_0)); assertEquals(0,queue.getNumPendingApplications(user_0)); assertTrue(queue.activeApplications.contains(app_1)); FiCaSchedulerApp app_2=getMockApplication(APPLICATION_ID++,user_0); queue.submitApplicationAttempt(app_2,user_0); assertEquals(2,queue.getNumActiveApplications()); assertEquals(1,queue.getNumPendingApplications()); assertEquals(2,queue.getNumActiveApplications(user_0)); assertEquals(1,queue.getNumPendingApplications(user_0)); assertTrue(queue.pendingApplications.contains(app_2)); FiCaSchedulerApp app_3=getMockApplication(APPLICATION_ID++,user_0); queue.submitApplicationAttempt(app_3,user_0); assertEquals(2,queue.getNumActiveApplications()); assertEquals(2,queue.getNumPendingApplications()); assertEquals(2,queue.getNumActiveApplications(user_0)); assertEquals(2,queue.getNumPendingApplications(user_0)); assertTrue(queue.pendingApplications.contains(app_3)); queue.finishApplicationAttempt(app_2,A); assertEquals(2,queue.getNumActiveApplications()); assertEquals(1,queue.getNumPendingApplications()); assertEquals(2,queue.getNumActiveApplications(user_0)); assertEquals(1,queue.getNumPendingApplications(user_0)); assertFalse(queue.pendingApplications.contains(app_2)); assertFalse(queue.activeApplications.contains(app_2)); queue.finishApplicationAttempt(app_0,A); assertEquals(2,queue.getNumActiveApplications()); assertEquals(0,queue.getNumPendingApplications()); assertEquals(2,queue.getNumActiveApplications(user_0)); assertEquals(0,queue.getNumPendingApplications(user_0)); assertTrue(queue.activeApplications.contains(app_3)); assertFalse(queue.pendingApplications.contains(app_3)); assertFalse(queue.activeApplications.contains(app_0)); queue.finishApplicationAttempt(app_1,A); assertEquals(1,queue.getNumActiveApplications()); assertEquals(0,queue.getNumPendingApplications()); assertEquals(1,queue.getNumActiveApplications(user_0)); assertEquals(0,queue.getNumPendingApplications(user_0)); assertFalse(queue.activeApplications.contains(app_1)); queue.finishApplicationAttempt(app_3,A); assertEquals(0,queue.getNumActiveApplications()); assertEquals(0,queue.getNumPendingApplications()); assertEquals(0,queue.getNumActiveApplications(user_0)); assertEquals(0,queue.getNumPendingApplications(user_0)); assertFalse(queue.activeApplications.contains(app_3)); }

Class: org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.TestCapacityScheduler

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testMoveAppSameParent() throws Exception { MockRM rm=setUpMove(); AbstractYarnScheduler scheduler=(AbstractYarnScheduler)rm.getResourceScheduler(); RMApp app=rm.submitApp(GB,"test-move-1","user_0",null,"a1"); ApplicationAttemptId appAttemptId=rm.getApplicationReport(app.getApplicationId()).getCurrentApplicationAttemptId(); List appsInA1=scheduler.getAppsInQueue("a1"); assertEquals(1,appsInA1.size()); String queue=scheduler.getApplicationAttempt(appsInA1.get(0)).getQueue().getQueueName(); Assert.assertTrue(queue.equals("a1")); List appsInA=scheduler.getAppsInQueue("a"); assertTrue(appsInA.contains(appAttemptId)); assertEquals(1,appsInA.size()); List appsInRoot=scheduler.getAppsInQueue("root"); assertTrue(appsInRoot.contains(appAttemptId)); assertEquals(1,appsInRoot.size()); List appsInA2=scheduler.getAppsInQueue("a2"); assertTrue(appsInA2.isEmpty()); scheduler.moveApplication(app.getApplicationId(),"a2"); appsInA2=scheduler.getAppsInQueue("a2"); assertEquals(1,appsInA2.size()); queue=scheduler.getApplicationAttempt(appsInA2.get(0)).getQueue().getQueueName(); Assert.assertTrue(queue.equals("a2")); appsInA1=scheduler.getAppsInQueue("a1"); assertTrue(appsInA1.isEmpty()); appsInA=scheduler.getAppsInQueue("a"); assertTrue(appsInA.contains(appAttemptId)); assertEquals(1,appsInA.size()); appsInRoot=scheduler.getAppsInQueue("root"); assertTrue(appsInRoot.contains(appAttemptId)); assertEquals(1,appsInRoot.size()); rm.stop(); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testMoveAllApps() throws Exception { MockRM rm=setUpMove(); AbstractYarnScheduler scheduler=(AbstractYarnScheduler)rm.getResourceScheduler(); RMApp app=rm.submitApp(GB,"test-move-1","user_0",null,"a1"); ApplicationAttemptId appAttemptId=rm.getApplicationReport(app.getApplicationId()).getCurrentApplicationAttemptId(); List appsInA1=scheduler.getAppsInQueue("a1"); assertEquals(1,appsInA1.size()); List appsInA=scheduler.getAppsInQueue("a"); assertTrue(appsInA.contains(appAttemptId)); assertEquals(1,appsInA.size()); String queue=scheduler.getApplicationAttempt(appsInA1.get(0)).getQueue().getQueueName(); Assert.assertTrue(queue.equals("a1")); List appsInRoot=scheduler.getAppsInQueue("root"); assertTrue(appsInRoot.contains(appAttemptId)); assertEquals(1,appsInRoot.size()); List appsInB1=scheduler.getAppsInQueue("b1"); assertTrue(appsInB1.isEmpty()); List appsInB=scheduler.getAppsInQueue("b"); assertTrue(appsInB.isEmpty()); scheduler.moveAllApps("a1","b1"); Thread.sleep(1000); appsInB1=scheduler.getAppsInQueue("b1"); assertEquals(1,appsInB1.size()); queue=scheduler.getApplicationAttempt(appsInB1.get(0)).getQueue().getQueueName(); Assert.assertTrue(queue.equals("b1")); appsInB=scheduler.getAppsInQueue("b"); assertTrue(appsInB.contains(appAttemptId)); assertEquals(1,appsInB.size()); appsInRoot=scheduler.getAppsInQueue("root"); assertTrue(appsInRoot.contains(appAttemptId)); assertEquals(1,appsInRoot.size()); appsInA1=scheduler.getAppsInQueue("a1"); assertTrue(appsInA1.isEmpty()); appsInA=scheduler.getAppsInQueue("a"); assertTrue(appsInA.isEmpty()); rm.stop(); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testKillAllAppsInQueue() throws Exception { MockRM rm=setUpMove(); AbstractYarnScheduler scheduler=(AbstractYarnScheduler)rm.getResourceScheduler(); RMApp app=rm.submitApp(GB,"test-move-1","user_0",null,"a1"); ApplicationAttemptId appAttemptId=rm.getApplicationReport(app.getApplicationId()).getCurrentApplicationAttemptId(); List appsInA1=scheduler.getAppsInQueue("a1"); assertEquals(1,appsInA1.size()); List appsInA=scheduler.getAppsInQueue("a"); assertTrue(appsInA.contains(appAttemptId)); assertEquals(1,appsInA.size()); String queue=scheduler.getApplicationAttempt(appsInA1.get(0)).getQueue().getQueueName(); Assert.assertTrue(queue.equals("a1")); List appsInRoot=scheduler.getAppsInQueue("root"); assertTrue(appsInRoot.contains(appAttemptId)); assertEquals(1,appsInRoot.size()); scheduler.killAllAppsInQueue("a1"); rm.waitForState(app.getApplicationId(),RMAppState.KILLED); appsInRoot=scheduler.getAppsInQueue("root"); assertTrue(appsInRoot.isEmpty()); appsInA1=scheduler.getAppsInQueue("a1"); assertTrue(appsInA1.isEmpty()); appsInA=scheduler.getAppsInQueue("a"); assertTrue(appsInA.isEmpty()); rm.stop(); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testMoveAppBasic() throws Exception { MockRM rm=setUpMove(); AbstractYarnScheduler scheduler=(AbstractYarnScheduler)rm.getResourceScheduler(); RMApp app=rm.submitApp(GB,"test-move-1","user_0",null,"a1"); ApplicationAttemptId appAttemptId=rm.getApplicationReport(app.getApplicationId()).getCurrentApplicationAttemptId(); List appsInA1=scheduler.getAppsInQueue("a1"); assertEquals(1,appsInA1.size()); String queue=scheduler.getApplicationAttempt(appsInA1.get(0)).getQueue().getQueueName(); Assert.assertTrue(queue.equals("a1")); List appsInA=scheduler.getAppsInQueue("a"); assertTrue(appsInA.contains(appAttemptId)); assertEquals(1,appsInA.size()); List appsInRoot=scheduler.getAppsInQueue("root"); assertTrue(appsInRoot.contains(appAttemptId)); assertEquals(1,appsInRoot.size()); List appsInB1=scheduler.getAppsInQueue("b1"); assertTrue(appsInB1.isEmpty()); List appsInB=scheduler.getAppsInQueue("b"); assertTrue(appsInB.isEmpty()); scheduler.moveApplication(app.getApplicationId(),"b1"); appsInB1=scheduler.getAppsInQueue("b1"); assertEquals(1,appsInB1.size()); queue=scheduler.getApplicationAttempt(appsInB1.get(0)).getQueue().getQueueName(); Assert.assertTrue(queue.equals("b1")); appsInB=scheduler.getAppsInQueue("b"); assertTrue(appsInB.contains(appAttemptId)); assertEquals(1,appsInB.size()); appsInRoot=scheduler.getAppsInQueue("root"); assertTrue(appsInRoot.contains(appAttemptId)); assertEquals(1,appsInRoot.size()); appsInA1=scheduler.getAppsInQueue("a1"); assertTrue(appsInA1.isEmpty()); appsInA=scheduler.getAppsInQueue("a"); assertTrue(appsInA.isEmpty()); rm.stop(); }

UtilityVerifier BooleanVerifier HybridVerifier 
@Test(timeout=30000) public void testConfValidation() throws Exception { ResourceScheduler scheduler=new CapacityScheduler(); scheduler.setRMContext(resourceManager.getRMContext()); Configuration conf=new YarnConfiguration(); conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB,2048); conf.setInt(YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_MB,1024); try { scheduler.reinitialize(conf,mockContext); fail("Exception is expected because the min memory allocation is" + " larger than the max memory allocation."); } catch ( YarnRuntimeException e) { assertTrue("The thrown exception is not the expected one.",e.getMessage().startsWith("Invalid resource scheduler memory")); } conf=new YarnConfiguration(); conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES,2); conf.setInt(YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES,1); try { scheduler.reinitialize(conf,mockContext); fail("Exception is expected because the min vcores allocation is" + " larger than the max vcores allocation."); } catch ( YarnRuntimeException e) { assertTrue("The thrown exception is not the expected one.",e.getMessage().startsWith("Invalid resource scheduler vcores")); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testRecoverRequestAfterPreemption() throws Exception { Configuration conf=new Configuration(); conf.setClass(YarnConfiguration.RM_SCHEDULER,CapacityScheduler.class,ResourceScheduler.class); MockRM rm1=new MockRM(conf); rm1.start(); MockNM nm1=rm1.registerNode("127.0.0.1:1234",8000); RMApp app1=rm1.submitApp(1024); MockAM am1=MockRM.launchAndRegisterAM(app1,rm1,nm1); CapacityScheduler cs=(CapacityScheduler)rm1.getResourceScheduler(); am1.allocate("127.0.0.1",1024,1,new ArrayList()); ContainerId containerId1=ContainerId.newInstance(am1.getApplicationAttemptId(),2); rm1.waitForState(nm1,containerId1,RMContainerState.ALLOCATED); RMContainer rmContainer=cs.getRMContainer(containerId1); List requests=rmContainer.getResourceRequests(); FiCaSchedulerApp app=cs.getApplicationAttempt(am1.getApplicationAttemptId()); FiCaSchedulerNode node=cs.getNode(rmContainer.getAllocatedNode()); for ( ResourceRequest request : requests) { if (request.getResourceName().equals(node.getRackName()) || request.getResourceName().equals(ResourceRequest.ANY)) { continue; } Assert.assertNull(app.getResourceRequest(request.getPriority(),request.getResourceName())); } cs.killContainer(rmContainer); Assert.assertEquals(3,requests.size()); for ( ResourceRequest request : requests) { Assert.assertEquals(1,app.getResourceRequest(request.getPriority(),request.getResourceName()).getNumContainers()); } ContainerId containerId2=ContainerId.newInstance(am1.getApplicationAttemptId(),3); rm1.waitForState(nm1,containerId2,RMContainerState.ALLOCATED); List containers=am1.allocate(new ArrayList(),new ArrayList()).getAllocatedContainers(); Assert.assertTrue(containers.size() == 1); }

UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testMoveAllAppsInvalidDestination() throws Exception { MockRM rm=setUpMove(); AbstractYarnScheduler scheduler=(AbstractYarnScheduler)rm.getResourceScheduler(); RMApp app=rm.submitApp(GB,"test-move-1","user_0",null,"a1"); ApplicationAttemptId appAttemptId=rm.getApplicationReport(app.getApplicationId()).getCurrentApplicationAttemptId(); List appsInA1=scheduler.getAppsInQueue("a1"); assertEquals(1,appsInA1.size()); List appsInA=scheduler.getAppsInQueue("a"); assertTrue(appsInA.contains(appAttemptId)); assertEquals(1,appsInA.size()); List appsInRoot=scheduler.getAppsInQueue("root"); assertTrue(appsInRoot.contains(appAttemptId)); assertEquals(1,appsInRoot.size()); List appsInB1=scheduler.getAppsInQueue("b1"); assertTrue(appsInB1.isEmpty()); List appsInB=scheduler.getAppsInQueue("b"); assertTrue(appsInB.isEmpty()); try { scheduler.moveAllApps("a1","DOES_NOT_EXIST"); Assert.fail(); } catch ( YarnException e) { } appsInA1=scheduler.getAppsInQueue("a1"); assertEquals(1,appsInA1.size()); appsInA=scheduler.getAppsInQueue("a"); assertTrue(appsInA.contains(appAttemptId)); assertEquals(1,appsInA.size()); appsInRoot=scheduler.getAppsInQueue("root"); assertTrue(appsInRoot.contains(appAttemptId)); assertEquals(1,appsInRoot.size()); appsInB1=scheduler.getAppsInQueue("b1"); assertTrue(appsInB1.isEmpty()); appsInB=scheduler.getAppsInQueue("b"); assertTrue(appsInB.isEmpty()); rm.stop(); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testCapacitySchedulerInfo() throws Exception { QueueInfo queueInfo=resourceManager.getResourceScheduler().getQueueInfo("a",true,true); Assert.assertEquals(queueInfo.getQueueName(),"a"); Assert.assertEquals(queueInfo.getChildQueues().size(),2); List userACLInfo=resourceManager.getResourceScheduler().getQueueUserAclInfo(); Assert.assertNotNull(userACLInfo); for ( QueueUserACLInfo queueUserACLInfo : userACLInfo) { Assert.assertEquals(getQueueCount(userACLInfo,queueUserACLInfo.getQueueName()),1); } }

UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testMoveAllAppsInvalidSource() throws Exception { MockRM rm=setUpMove(); AbstractYarnScheduler scheduler=(AbstractYarnScheduler)rm.getResourceScheduler(); RMApp app=rm.submitApp(GB,"test-move-1","user_0",null,"a1"); ApplicationAttemptId appAttemptId=rm.getApplicationReport(app.getApplicationId()).getCurrentApplicationAttemptId(); List appsInA1=scheduler.getAppsInQueue("a1"); assertEquals(1,appsInA1.size()); List appsInA=scheduler.getAppsInQueue("a"); assertTrue(appsInA.contains(appAttemptId)); assertEquals(1,appsInA.size()); List appsInRoot=scheduler.getAppsInQueue("root"); assertTrue(appsInRoot.contains(appAttemptId)); assertEquals(1,appsInRoot.size()); List appsInB1=scheduler.getAppsInQueue("b1"); assertTrue(appsInB1.isEmpty()); List appsInB=scheduler.getAppsInQueue("b"); assertTrue(appsInB.isEmpty()); try { scheduler.moveAllApps("DOES_NOT_EXIST","b1"); Assert.fail(); } catch ( YarnException e) { } appsInA1=scheduler.getAppsInQueue("a1"); assertEquals(1,appsInA1.size()); appsInA=scheduler.getAppsInQueue("a"); assertTrue(appsInA.contains(appAttemptId)); assertEquals(1,appsInA.size()); appsInRoot=scheduler.getAppsInQueue("root"); assertTrue(appsInRoot.contains(appAttemptId)); assertEquals(1,appsInRoot.size()); appsInB1=scheduler.getAppsInQueue("b1"); assertTrue(appsInB1.isEmpty()); appsInB=scheduler.getAppsInQueue("b"); assertTrue(appsInB.isEmpty()); rm.stop(); }

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testGetAppsInQueue() throws Exception { Application application_0=new Application("user_0","a1",resourceManager); application_0.submit(); Application application_1=new Application("user_0","a2",resourceManager); application_1.submit(); Application application_2=new Application("user_0","b2",resourceManager); application_2.submit(); ResourceScheduler scheduler=resourceManager.getResourceScheduler(); List appsInA1=scheduler.getAppsInQueue("a1"); assertEquals(1,appsInA1.size()); List appsInA=scheduler.getAppsInQueue("a"); assertTrue(appsInA.contains(application_0.getApplicationAttemptId())); assertTrue(appsInA.contains(application_1.getApplicationAttemptId())); assertEquals(2,appsInA.size()); List appsInRoot=scheduler.getAppsInQueue("root"); assertTrue(appsInRoot.contains(application_0.getApplicationAttemptId())); assertTrue(appsInRoot.contains(application_1.getApplicationAttemptId())); assertTrue(appsInRoot.contains(application_2.getApplicationAttemptId())); assertEquals(3,appsInRoot.size()); Assert.assertNull(scheduler.getAppsInQueue("nonexistentqueue")); }

UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testKillAllAppsInvalidSource() throws Exception { MockRM rm=setUpMove(); AbstractYarnScheduler scheduler=(AbstractYarnScheduler)rm.getResourceScheduler(); RMApp app=rm.submitApp(GB,"test-move-1","user_0",null,"a1"); ApplicationAttemptId appAttemptId=rm.getApplicationReport(app.getApplicationId()).getCurrentApplicationAttemptId(); List appsInA1=scheduler.getAppsInQueue("a1"); assertEquals(1,appsInA1.size()); List appsInA=scheduler.getAppsInQueue("a"); assertTrue(appsInA.contains(appAttemptId)); assertEquals(1,appsInA.size()); List appsInRoot=scheduler.getAppsInQueue("root"); assertTrue(appsInRoot.contains(appAttemptId)); assertEquals(1,appsInRoot.size()); try { scheduler.killAllAppsInQueue("DOES_NOT_EXIST"); Assert.fail(); } catch ( YarnException e) { } appsInA1=scheduler.getAppsInQueue("a1"); assertEquals(1,appsInA1.size()); appsInA=scheduler.getAppsInQueue("a"); assertTrue(appsInA.contains(appAttemptId)); assertEquals(1,appsInA.size()); appsInRoot=scheduler.getAppsInQueue("root"); assertTrue(appsInRoot.contains(appAttemptId)); assertEquals(1,appsInRoot.size()); rm.stop(); }

Class: org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.TestContainerAllocation

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testContainerTokenGeneratedOnPullRequest() throws Exception { MockRM rm1=new MockRM(conf); rm1.start(); MockNM nm1=rm1.registerNode("127.0.0.1:1234",8000); RMApp app1=rm1.submitApp(200); MockAM am1=MockRM.launchAndRegisterAM(app1,rm1,nm1); am1.allocate("127.0.0.1",1024,1,new ArrayList()); ContainerId containerId2=ContainerId.newInstance(am1.getApplicationAttemptId(),2); rm1.waitForState(nm1,containerId2,RMContainerState.ALLOCATED); RMContainer container=rm1.getResourceScheduler().getRMContainer(containerId2); Assert.assertEquals(containerId2,container.getContainerId()); Assert.assertNull(container.getContainer().getContainerToken()); List containers=am1.allocate(new ArrayList(),new ArrayList()).getAllocatedContainers(); Assert.assertEquals(containerId2,containers.get(0).getId()); Assert.assertNotNull(containers.get(0).getContainerToken()); rm1.stop(); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=3000000) public void testExcessReservationThanNodeManagerCapacity() throws Exception { MockRM rm=new MockRM(conf); rm.start(); MockNM nm1=rm.registerNode("127.0.0.1:1234",2 * GB,4); MockNM nm2=rm.registerNode("127.0.0.1:2234",3 * GB,4); nm1.nodeHeartbeat(true); nm2.nodeHeartbeat(true); int waitCount=20; int size=rm.getRMContext().getRMNodes().size(); while ((size=rm.getRMContext().getRMNodes().size()) != 2 && waitCount-- > 0) { LOG.info("Waiting for node managers to register : " + size); Thread.sleep(100); } Assert.assertEquals(2,rm.getRMContext().getRMNodes().size()); RMApp app1=rm.submitApp(128); nm1.nodeHeartbeat(true); RMAppAttempt attempt1=app1.getCurrentAppAttempt(); MockAM am1=rm.sendAMLaunched(attempt1.getAppAttemptId()); am1.registerAppAttempt(); LOG.info("sending container requests "); am1.addRequests(new String[]{"*"},3 * GB,1,1); AllocateResponse alloc1Response=am1.schedule(); nm1.nodeHeartbeat(true); int waitCounter=20; LOG.info("heartbeating nm1"); while (alloc1Response.getAllocatedContainers().size() < 1 && waitCounter-- > 0) { LOG.info("Waiting for containers to be created for app 1..."); Thread.sleep(500); alloc1Response=am1.schedule(); } LOG.info("received container : " + alloc1Response.getAllocatedContainers().size()); Assert.assertTrue(alloc1Response.getAllocatedContainers().size() == 0); LOG.info("heartbeating nm2"); waitCounter=20; nm2.nodeHeartbeat(true); while (alloc1Response.getAllocatedContainers().size() < 1 && waitCounter-- > 0) { LOG.info("Waiting for containers to be created for app 1..."); Thread.sleep(500); alloc1Response=am1.schedule(); } LOG.info("received container : " + alloc1Response.getAllocatedContainers().size()); Assert.assertTrue(alloc1Response.getAllocatedContainers().size() == 1); rm.stop(); }

Class: org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.TestParentQueue

UtilityVerifier BooleanVerifier HybridVerifier 
@Test public void testQueueCapacityZero() throws Exception { setupMultiLevelQueues(csConf); final String Q_B=CapacitySchedulerConfiguration.ROOT + "." + B; csConf.setCapacity(Q_B,0); csConf.setCapacity(Q_B + "." + B1,0); csConf.setCapacity(Q_B + "." + B2,0); csConf.setCapacity(Q_B + "." + B3,0); final String Q_A=CapacitySchedulerConfiguration.ROOT + "." + A; csConf.setCapacity(Q_A,60); Map queues=new HashMap(); try { CapacityScheduler.parseQueue(csContext,csConf,null,CapacitySchedulerConfiguration.ROOT,queues,queues,TestUtils.spyHook); } catch ( IllegalArgumentException e) { fail("Failed to create queues with 0 capacity: " + e); } assertTrue("Failed to create queues with 0 capacity",true); }

Class: org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.TestAllocationFileLoaderService

InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
@Test public void testAllocationFileParsing() throws Exception { Configuration conf=new Configuration(); conf.set(FairSchedulerConfiguration.ALLOCATION_FILE,ALLOC_FILE); AllocationFileLoaderService allocLoader=new AllocationFileLoaderService(); PrintWriter out=new PrintWriter(new FileWriter(ALLOC_FILE)); out.println(""); out.println(""); out.println(""); out.println("1024mb,0vcores"); out.println(""); out.println(""); out.println("2048mb,0vcores"); out.println("alice,bob admins"); out.println("fair"); out.println(""); out.println(""); out.println("alice,bob admins"); out.println(""); out.println(""); out.println("3"); out.println("0.4"); out.println(""); out.println(""); out.println("60"); out.println(""); out.println(""); out.println(""); out.println(""); out.println(" "); out.println(" "); out.println(""); out.println("15"); out.println("5"); out.println("0.5f"); out.println(""); out.println("10"); out.println(""); out.println("120" + ""); out.println("300"); out.println("drf"); out.println(""); out.close(); allocLoader.init(conf); ReloadListener confHolder=new ReloadListener(); allocLoader.setReloadListener(confHolder); allocLoader.reloadAllocations(); AllocationConfiguration queueConf=confHolder.allocConf; assertEquals(6,queueConf.getConfiguredQueues().get(FSQueueType.LEAF).size()); assertEquals(Resources.createResource(0),queueConf.getMinResources("root." + YarnConfiguration.DEFAULT_QUEUE_NAME)); assertEquals(Resources.createResource(0),queueConf.getMinResources("root." + YarnConfiguration.DEFAULT_QUEUE_NAME)); assertEquals(Resources.createResource(1024,0),queueConf.getMinResources("root.queueA")); assertEquals(Resources.createResource(2048,0),queueConf.getMinResources("root.queueB")); assertEquals(Resources.createResource(0),queueConf.getMinResources("root.queueC")); assertEquals(Resources.createResource(0),queueConf.getMinResources("root.queueD")); assertEquals(Resources.createResource(0),queueConf.getMinResources("root.queueE")); assertEquals(15,queueConf.getQueueMaxApps("root." + YarnConfiguration.DEFAULT_QUEUE_NAME)); assertEquals(15,queueConf.getQueueMaxApps("root.queueA")); assertEquals(15,queueConf.getQueueMaxApps("root.queueB")); assertEquals(15,queueConf.getQueueMaxApps("root.queueC")); assertEquals(3,queueConf.getQueueMaxApps("root.queueD")); assertEquals(15,queueConf.getQueueMaxApps("root.queueE")); assertEquals(10,queueConf.getUserMaxApps("user1")); assertEquals(5,queueConf.getUserMaxApps("user2")); assertEquals(.5f,queueConf.getQueueMaxAMShare("root." + YarnConfiguration.DEFAULT_QUEUE_NAME),0.01); assertEquals(.5f,queueConf.getQueueMaxAMShare("root.queueA"),0.01); assertEquals(.5f,queueConf.getQueueMaxAMShare("root.queueB"),0.01); assertEquals(.5f,queueConf.getQueueMaxAMShare("root.queueC"),0.01); assertEquals(.4f,queueConf.getQueueMaxAMShare("root.queueD"),0.01); assertEquals(.5f,queueConf.getQueueMaxAMShare("root.queueE"),0.01); assertEquals("*",queueConf.getQueueAcl("root",QueueACL.ADMINISTER_QUEUE).getAclString()); assertEquals("*",queueConf.getQueueAcl("root",QueueACL.SUBMIT_APPLICATIONS).getAclString()); assertEquals(" ",queueConf.getQueueAcl("root.queueA",QueueACL.ADMINISTER_QUEUE).getAclString()); assertEquals(" ",queueConf.getQueueAcl("root.queueA",QueueACL.SUBMIT_APPLICATIONS).getAclString()); assertEquals("alice,bob admins",queueConf.getQueueAcl("root.queueB",QueueACL.ADMINISTER_QUEUE).getAclString()); assertEquals("alice,bob admins",queueConf.getQueueAcl("root.queueC",QueueACL.SUBMIT_APPLICATIONS).getAclString()); assertEquals(120000,queueConf.getMinSharePreemptionTimeout("root." + YarnConfiguration.DEFAULT_QUEUE_NAME)); assertEquals(120000,queueConf.getMinSharePreemptionTimeout("root.queueA")); assertEquals(120000,queueConf.getMinSharePreemptionTimeout("root.queueB")); assertEquals(120000,queueConf.getMinSharePreemptionTimeout("root.queueC")); assertEquals(120000,queueConf.getMinSharePreemptionTimeout("root.queueD")); assertEquals(120000,queueConf.getMinSharePreemptionTimeout("root.queueA")); assertEquals(60000,queueConf.getMinSharePreemptionTimeout("root.queueE")); assertEquals(300000,queueConf.getFairSharePreemptionTimeout()); assertTrue(queueConf.getConfiguredQueues().get(FSQueueType.PARENT).contains("root.queueF")); assertTrue(queueConf.getConfiguredQueues().get(FSQueueType.PARENT).contains("root.queueG")); assertTrue(queueConf.getConfiguredQueues().get(FSQueueType.LEAF).contains("root.queueG.queueH")); assertEquals(DominantResourceFairnessPolicy.NAME,queueConf.getSchedulingPolicy("root").getName()); assertEquals(DominantResourceFairnessPolicy.NAME,queueConf.getSchedulingPolicy("root.queueA").getName()); assertEquals(FairSharePolicy.NAME,queueConf.getSchedulingPolicy("root.queueB").getName()); assertEquals(DominantResourceFairnessPolicy.NAME,queueConf.getSchedulingPolicy("root.newqueue").getName()); }

InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
@Test(timeout=10000) public void testReload() throws Exception { PrintWriter out=new PrintWriter(new FileWriter(ALLOC_FILE)); out.println(""); out.println(""); out.println(" "); out.println(" 1"); out.println(" "); out.println(" "); out.println(" "); out.println(" "); out.println(" "); out.println(""); out.close(); MockClock clock=new MockClock(); Configuration conf=new Configuration(); conf.set(FairSchedulerConfiguration.ALLOCATION_FILE,ALLOC_FILE); AllocationFileLoaderService allocLoader=new AllocationFileLoaderService(clock); allocLoader.reloadIntervalMs=5; allocLoader.init(conf); ReloadListener confHolder=new ReloadListener(); allocLoader.setReloadListener(confHolder); allocLoader.reloadAllocations(); AllocationConfiguration allocConf=confHolder.allocConf; QueuePlacementPolicy policy=allocConf.getPlacementPolicy(); List rules=policy.getRules(); assertEquals(1,rules.size()); assertEquals(QueuePlacementRule.Default.class,rules.get(0).getClass()); assertEquals(1,allocConf.getQueueMaxApps("root.queueA")); assertEquals(2,allocConf.getConfiguredQueues().get(FSQueueType.LEAF).size()); assertTrue(allocConf.getConfiguredQueues().get(FSQueueType.LEAF).contains("root.queueA")); assertTrue(allocConf.getConfiguredQueues().get(FSQueueType.LEAF).contains("root.queueB")); confHolder.allocConf=null; out=new PrintWriter(new FileWriter(ALLOC_FILE)); out.println(""); out.println(""); out.println(" "); out.println(" 3"); out.println(" "); out.println(" "); out.println(" "); out.println(" "); out.println(" "); out.println(" "); out.println(" "); out.println(" "); out.println(""); out.close(); clock.tick(System.currentTimeMillis() + AllocationFileLoaderService.ALLOC_RELOAD_WAIT_MS + 10000); allocLoader.start(); while (confHolder.allocConf == null) { Thread.sleep(20); } allocConf=confHolder.allocConf; policy=allocConf.getPlacementPolicy(); rules=policy.getRules(); assertEquals(3,rules.size()); assertEquals(QueuePlacementRule.Specified.class,rules.get(0).getClass()); assertEquals(QueuePlacementRule.NestedUserQueue.class,rules.get(1).getClass()); assertEquals(QueuePlacementRule.PrimaryGroup.class,((NestedUserQueue)(rules.get(1))).nestedRule.getClass()); assertEquals(QueuePlacementRule.Default.class,rules.get(2).getClass()); assertEquals(3,allocConf.getQueueMaxApps("root.queueB")); assertEquals(1,allocConf.getConfiguredQueues().get(FSQueueType.LEAF).size()); assertTrue(allocConf.getConfiguredQueues().get(FSQueueType.LEAF).contains("root.queueB")); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testGetAllocationFileFromClasspath(){ Configuration conf=new Configuration(); conf.set(FairSchedulerConfiguration.ALLOCATION_FILE,"test-fair-scheduler.xml"); AllocationFileLoaderService allocLoader=new AllocationFileLoaderService(); File allocationFile=allocLoader.getAllocationFile(conf); assertEquals("test-fair-scheduler.xml",allocationFile.getName()); assertTrue(allocationFile.exists()); }

Class: org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.TestFairScheduler

BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=2000) public void testLoadConfigurationOnInitialize() throws IOException { conf.setBoolean(FairSchedulerConfiguration.ASSIGN_MULTIPLE,true); conf.setInt(FairSchedulerConfiguration.MAX_ASSIGN,3); conf.setBoolean(FairSchedulerConfiguration.SIZE_BASED_WEIGHT,true); conf.setDouble(FairSchedulerConfiguration.LOCALITY_THRESHOLD_NODE,.5); conf.setDouble(FairSchedulerConfiguration.LOCALITY_THRESHOLD_RACK,.7); conf.setBoolean(FairSchedulerConfiguration.CONTINUOUS_SCHEDULING_ENABLED,true); conf.setInt(FairSchedulerConfiguration.CONTINUOUS_SCHEDULING_SLEEP_MS,10); conf.setInt(FairSchedulerConfiguration.LOCALITY_DELAY_RACK_MS,5000); conf.setInt(FairSchedulerConfiguration.LOCALITY_DELAY_NODE_MS,5000); conf.setInt(YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_MB,1024); conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB,512); conf.setInt(FairSchedulerConfiguration.RM_SCHEDULER_INCREMENT_ALLOCATION_MB,128); scheduler.init(conf); scheduler.start(); scheduler.reinitialize(conf,resourceManager.getRMContext()); Assert.assertEquals(true,scheduler.assignMultiple); Assert.assertEquals(3,scheduler.maxAssign); Assert.assertEquals(true,scheduler.sizeBasedWeight); Assert.assertEquals(.5,scheduler.nodeLocalityThreshold,.01); Assert.assertEquals(.7,scheduler.rackLocalityThreshold,.01); Assert.assertTrue("The continuous scheduling should be enabled",scheduler.continuousSchedulingEnabled); Assert.assertEquals(10,scheduler.continuousSchedulingSleepMs); Assert.assertEquals(5000,scheduler.nodeLocalityDelayMs); Assert.assertEquals(5000,scheduler.rackLocalityDelayMs); Assert.assertEquals(1024,scheduler.getMaximumResourceCapability().getMemory()); Assert.assertEquals(512,scheduler.getMinimumResourceCapability().getMemory()); Assert.assertEquals(128,scheduler.getIncrementResourceCapability().getMemory()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=5000) public void testRecoverRequestAfterPreemption() throws Exception { conf.setLong(FairSchedulerConfiguration.WAIT_TIME_BEFORE_KILL,10); MockClock clock=new MockClock(); scheduler.setClock(clock); scheduler.init(conf); scheduler.start(); scheduler.reinitialize(conf,resourceManager.getRMContext()); Priority priority=Priority.newInstance(20); String host="127.0.0.1"; int GB=1024; RMNode node=MockNodes.newNodeInfo(1,Resources.createResource(16 * 1024,4),0,host); NodeAddedSchedulerEvent nodeEvent=new NodeAddedSchedulerEvent(node); scheduler.handle(nodeEvent); List ask=new ArrayList(); ResourceRequest nodeLocalRequest=createResourceRequest(GB,1,host,priority.getPriority(),1,true); ResourceRequest rackLocalRequest=createResourceRequest(GB,1,node.getRackName(),priority.getPriority(),1,true); ResourceRequest offRackRequest=createResourceRequest(GB,1,ResourceRequest.ANY,priority.getPriority(),1,true); ask.add(nodeLocalRequest); ask.add(rackLocalRequest); ask.add(offRackRequest); ApplicationAttemptId appAttemptId=createSchedulingRequest("queueA","user1",ask); scheduler.update(); NodeUpdateSchedulerEvent nodeUpdate=new NodeUpdateSchedulerEvent(node); scheduler.handle(nodeUpdate); assertEquals(1,scheduler.getSchedulerApp(appAttemptId).getLiveContainers().size()); FSAppAttempt app=scheduler.getSchedulerApp(appAttemptId); Assert.assertNull(app.getResourceRequest(priority,host)); ContainerId containerId1=ContainerId.newInstance(appAttemptId,1); RMContainer rmContainer=app.getRMContainer(containerId1); scheduler.warnOrKillContainer(rmContainer); clock.tick(5); scheduler.warnOrKillContainer(rmContainer); List requests=rmContainer.getResourceRequests(); Assert.assertEquals(3,requests.size()); for ( ResourceRequest request : requests) { Assert.assertEquals(1,app.getResourceRequest(priority,request.getResourceName()).getNumContainers()); } scheduler.update(); scheduler.handle(nodeUpdate); List containers=scheduler.allocate(appAttemptId,Collections.emptyList(),Collections.emptyList(),null,null).getContainers(); Assert.assertTrue(containers.size() == 1); }

BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=5000) public void testChoiceOfPreemptedContainers() throws Exception { conf.setLong(FairSchedulerConfiguration.PREEMPTION_INTERVAL,5000); conf.setLong(FairSchedulerConfiguration.WAIT_TIME_BEFORE_KILL,10000); conf.set(FairSchedulerConfiguration.ALLOCATION_FILE + ".allocation.file",ALLOC_FILE); conf.set(FairSchedulerConfiguration.USER_AS_DEFAULT_QUEUE,"false"); MockClock clock=new MockClock(); scheduler.setClock(clock); PrintWriter out=new PrintWriter(new FileWriter(ALLOC_FILE)); out.println(""); out.println(""); out.println(""); out.println(".25"); out.println(""); out.println(""); out.println(".25"); out.println(""); out.println(""); out.println(".25"); out.println(""); out.println(""); out.println(".25"); out.println(""); out.println(""); out.close(); scheduler.init(conf); scheduler.start(); scheduler.reinitialize(conf,resourceManager.getRMContext()); RMNode node1=MockNodes.newNodeInfo(1,Resources.createResource(4 * 1024,4),1,"127.0.0.1"); NodeAddedSchedulerEvent nodeEvent1=new NodeAddedSchedulerEvent(node1); scheduler.handle(nodeEvent1); RMNode node2=MockNodes.newNodeInfo(1,Resources.createResource(4 * 1024,4),2,"127.0.0.2"); NodeAddedSchedulerEvent nodeEvent2=new NodeAddedSchedulerEvent(node2); scheduler.handle(nodeEvent2); ApplicationAttemptId app1=createSchedulingRequest(1 * 1024,1,"queueA","user1",1,1); createSchedulingRequestExistingApplication(1 * 1024,1,2,app1); ApplicationAttemptId app2=createSchedulingRequest(1 * 1024,1,"queueA","user1",1,3); createSchedulingRequestExistingApplication(1 * 1024,1,4,app2); ApplicationAttemptId app3=createSchedulingRequest(1 * 1024,1,"queueB","user1",1,1); createSchedulingRequestExistingApplication(1 * 1024,1,2,app3); ApplicationAttemptId app4=createSchedulingRequest(1 * 1024,1,"queueB","user1",1,3); createSchedulingRequestExistingApplication(1 * 1024,1,4,app4); scheduler.update(); scheduler.getQueueManager().getLeafQueue("queueA",true).setPolicy(SchedulingPolicy.parse("fifo")); scheduler.getQueueManager().getLeafQueue("queueB",true).setPolicy(SchedulingPolicy.parse("fair")); NodeUpdateSchedulerEvent nodeUpdate1=new NodeUpdateSchedulerEvent(node1); NodeUpdateSchedulerEvent nodeUpdate2=new NodeUpdateSchedulerEvent(node2); for (int i=0; i < 4; i++) { scheduler.handle(nodeUpdate1); scheduler.handle(nodeUpdate2); } assertEquals(2,scheduler.getSchedulerApp(app1).getLiveContainers().size()); assertEquals(2,scheduler.getSchedulerApp(app2).getLiveContainers().size()); assertEquals(2,scheduler.getSchedulerApp(app3).getLiveContainers().size()); assertEquals(2,scheduler.getSchedulerApp(app4).getLiveContainers().size()); createSchedulingRequest(1 * 1024,1,"queueC","user1",1,1); createSchedulingRequest(1 * 1024,1,"queueC","user1",1,1); createSchedulingRequest(1 * 1024,1,"default","user1",1,1); createSchedulingRequest(1 * 1024,1,"default","user1",1,1); scheduler.update(); scheduler.preemptResources(Resources.createResource(2 * 1024)); assertEquals(2,scheduler.getSchedulerApp(app1).getLiveContainers().size()); assertEquals(2,scheduler.getSchedulerApp(app3).getLiveContainers().size()); assertTrue("App2 should have container to be preempted",!Collections.disjoint(scheduler.getSchedulerApp(app2).getLiveContainers(),scheduler.getSchedulerApp(app2).getPreemptionContainers())); assertTrue("App4 should have container to be preempted",!Collections.disjoint(scheduler.getSchedulerApp(app2).getLiveContainers(),scheduler.getSchedulerApp(app2).getPreemptionContainers())); clock.tick(15); scheduler.preemptResources(Resources.createResource(2 * 1024)); assertEquals(1,scheduler.getSchedulerApp(app2).getLiveContainers().size()); assertEquals(1,scheduler.getSchedulerApp(app4).getLiveContainers().size()); Set set=new HashSet(); for ( RMContainer container : scheduler.getSchedulerApp(app2).getLiveContainers()) { if (container.getAllocatedPriority().getPriority() == 4) { set.add(container); } } for ( RMContainer container : scheduler.getSchedulerApp(app4).getLiveContainers()) { if (container.getAllocatedPriority().getPriority() == 4) { set.add(container); } } assertTrue("Containers with priority=4 in app2 and app4 should be " + "preempted.",set.isEmpty()); scheduler.preemptResources(Resources.createResource(2 * 1024)); clock.tick(15); scheduler.preemptResources(Resources.createResource(2 * 1024)); assertEquals(2,scheduler.getSchedulerApp(app1).getLiveContainers().size()); assertEquals(0,scheduler.getSchedulerApp(app2).getLiveContainers().size()); assertEquals(1,scheduler.getSchedulerApp(app3).getLiveContainers().size()); assertEquals(1,scheduler.getSchedulerApp(app4).getLiveContainers().size()); scheduler.preemptResources(Resources.createResource(2 * 1024)); assertTrue("App1 should have no container to be preempted",scheduler.getSchedulerApp(app1).getPreemptionContainers().isEmpty()); assertTrue("App2 should have no container to be preempted",scheduler.getSchedulerApp(app2).getPreemptionContainers().isEmpty()); assertTrue("App3 should have no container to be preempted",scheduler.getSchedulerApp(app3).getPreemptionContainers().isEmpty()); assertTrue("App4 should have no container to be preempted",scheduler.getSchedulerApp(app4).getPreemptionContainers().isEmpty()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testMoveMakesAppRunnable() throws Exception { scheduler.init(conf); scheduler.start(); scheduler.reinitialize(conf,resourceManager.getRMContext()); QueueManager queueMgr=scheduler.getQueueManager(); FSLeafQueue oldQueue=queueMgr.getLeafQueue("queue1",true); FSLeafQueue targetQueue=queueMgr.getLeafQueue("queue2",true); scheduler.getAllocationConfiguration().queueMaxApps.put("root.queue1",0); ApplicationAttemptId appAttId=createSchedulingRequest(1024,1,"queue1","user1",3); FSAppAttempt app=scheduler.getSchedulerApp(appAttId); assertTrue(oldQueue.getNonRunnableAppSchedulables().contains(app)); scheduler.moveApplication(appAttId.getApplicationId(),"queue2"); assertFalse(oldQueue.getNonRunnableAppSchedulables().contains(app)); assertFalse(targetQueue.getNonRunnableAppSchedulables().contains(app)); assertTrue(targetQueue.getRunnableAppSchedulables().contains(app)); assertEquals(1,targetQueue.getNumRunnableApps()); assertEquals(1,queueMgr.getRootQueue().getNumRunnableApps()); }

UtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testContinuousSchedulingWithNodeRemoved() throws Exception { scheduler.init(conf); scheduler.start(); Assert.assertTrue("Continuous scheduling should be disabled.",!scheduler.isContinuousSchedulingEnabled()); RMNode node1=MockNodes.newNodeInfo(1,Resources.createResource(8 * 1024,8),1,"127.0.0.1"); NodeAddedSchedulerEvent nodeEvent1=new NodeAddedSchedulerEvent(node1); scheduler.handle(nodeEvent1); RMNode node2=MockNodes.newNodeInfo(1,Resources.createResource(8 * 1024,8),2,"127.0.0.2"); NodeAddedSchedulerEvent nodeEvent2=new NodeAddedSchedulerEvent(node2); scheduler.handle(nodeEvent2); Assert.assertEquals("We should have two alive nodes.",2,scheduler.getNumClusterNodes()); NodeRemovedSchedulerEvent removeNode1=new NodeRemovedSchedulerEvent(node1); scheduler.handle(removeNode1); Assert.assertEquals("We should only have one alive node.",1,scheduler.getNumClusterNodes()); try { scheduler.continuousSchedulingAttempt(); } catch ( Exception e) { fail("Exception happened when doing continuous scheduling. " + e.toString()); } }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testContinuousScheduling() throws Exception { FairScheduler fs=new FairScheduler(); Configuration conf=createConfiguration(); conf.setBoolean(FairSchedulerConfiguration.CONTINUOUS_SCHEDULING_ENABLED,true); fs.setRMContext(resourceManager.getRMContext()); fs.init(conf); fs.start(); fs.reinitialize(conf,resourceManager.getRMContext()); Assert.assertTrue("Continuous scheduling should be enabled.",fs.isContinuousSchedulingEnabled()); RMNode node1=MockNodes.newNodeInfo(1,Resources.createResource(8 * 1024,8),1,"127.0.0.1"); NodeAddedSchedulerEvent nodeEvent1=new NodeAddedSchedulerEvent(node1); fs.handle(nodeEvent1); RMNode node2=MockNodes.newNodeInfo(1,Resources.createResource(8 * 1024,8),2,"127.0.0.2"); NodeAddedSchedulerEvent nodeEvent2=new NodeAddedSchedulerEvent(node2); fs.handle(nodeEvent2); Assert.assertEquals(fs.getClusterResource().getMemory(),16 * 1024); Assert.assertEquals(fs.getClusterResource().getVirtualCores(),16); ApplicationAttemptId appAttemptId=createAppAttemptId(this.APP_ID++,this.ATTEMPT_ID++); fs.addApplication(appAttemptId.getApplicationId(),"queue11","user11",false); fs.addApplicationAttempt(appAttemptId,false,false); List ask=new ArrayList(); ResourceRequest request=createResourceRequest(1024,1,ResourceRequest.ANY,1,1,true); ask.add(request); fs.allocate(appAttemptId,ask,new ArrayList(),null,null); Thread.sleep(fs.getConf().getContinuousSchedulingSleepMs() + 500); FSAppAttempt app=fs.getSchedulerApp(appAttemptId); while (app.getCurrentConsumption().equals(Resources.none())) { } Assert.assertEquals(1024,app.getCurrentConsumption().getMemory()); Assert.assertEquals(1,app.getCurrentConsumption().getVirtualCores()); request=createResourceRequest(1024,1,ResourceRequest.ANY,2,1,true); ask.clear(); ask.add(request); fs.allocate(appAttemptId,ask,new ArrayList(),null,null); while (app.getCurrentConsumption().equals(Resources.createResource(1024,1))) { } Assert.assertEquals(2048,app.getCurrentConsumption().getMemory()); Assert.assertEquals(2,app.getCurrentConsumption().getVirtualCores()); Set nodes=new HashSet(); Iterator it=app.getLiveContainers().iterator(); while (it.hasNext()) { nodes.add(it.next().getContainer().getNodeId()); } Assert.assertEquals(2,nodes.size()); }

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testGetAppsInQueue() throws Exception { scheduler.init(conf); scheduler.start(); scheduler.reinitialize(conf,resourceManager.getRMContext()); ApplicationAttemptId appAttId1=createSchedulingRequest(1024,1,"queue1.subqueue1","user1"); ApplicationAttemptId appAttId2=createSchedulingRequest(1024,1,"queue1.subqueue2","user1"); ApplicationAttemptId appAttId3=createSchedulingRequest(1024,1,"default","user1"); List apps=scheduler.getAppsInQueue("queue1.subqueue1"); assertEquals(1,apps.size()); assertEquals(appAttId1,apps.get(0)); apps=scheduler.getAppsInQueue("root.queue1.subqueue1"); assertEquals(1,apps.size()); assertEquals(appAttId1,apps.get(0)); apps=scheduler.getAppsInQueue("user1"); assertEquals(1,apps.size()); assertEquals(appAttId3,apps.get(0)); apps=scheduler.getAppsInQueue("root.user1"); assertEquals(1,apps.size()); assertEquals(appAttId3,apps.get(0)); apps=scheduler.getAppsInQueue("queue1"); Assert.assertEquals(2,apps.size()); Set appAttIds=Sets.newHashSet(apps.get(0),apps.get(1)); assertTrue(appAttIds.contains(appAttId1)); assertTrue(appAttIds.contains(appAttId2)); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=5000) public void testPreemptionDecision() throws Exception { conf.set(FairSchedulerConfiguration.ALLOCATION_FILE,ALLOC_FILE); MockClock clock=new MockClock(); scheduler.setClock(clock); PrintWriter out=new PrintWriter(new FileWriter(ALLOC_FILE)); out.println(""); out.println(""); out.println(""); out.println("0mb,0vcores"); out.println(""); out.println(""); out.println(".25"); out.println("1024mb,0vcores"); out.println(""); out.println(""); out.println(".25"); out.println("1024mb,0vcores"); out.println(""); out.println(""); out.println(".25"); out.println("1024mb,0vcores"); out.println(""); out.println(""); out.println(".25"); out.println("1024mb,0vcores"); out.println(""); out.print("5"); out.print("10"); out.println(""); out.close(); scheduler.init(conf); scheduler.start(); scheduler.reinitialize(conf,resourceManager.getRMContext()); RMNode node1=MockNodes.newNodeInfo(1,Resources.createResource(2 * 1024,2),1,"127.0.0.1"); NodeAddedSchedulerEvent nodeEvent1=new NodeAddedSchedulerEvent(node1); scheduler.handle(nodeEvent1); RMNode node2=MockNodes.newNodeInfo(1,Resources.createResource(2 * 1024,2),2,"127.0.0.2"); NodeAddedSchedulerEvent nodeEvent2=new NodeAddedSchedulerEvent(node2); scheduler.handle(nodeEvent2); RMNode node3=MockNodes.newNodeInfo(1,Resources.createResource(2 * 1024,2),3,"127.0.0.3"); NodeAddedSchedulerEvent nodeEvent3=new NodeAddedSchedulerEvent(node3); scheduler.handle(nodeEvent3); ApplicationAttemptId app1=createSchedulingRequest(1 * 1024,"queueA","user1",1,1); ApplicationAttemptId app2=createSchedulingRequest(1 * 1024,"queueA","user1",1,2); ApplicationAttemptId app3=createSchedulingRequest(1 * 1024,"queueA","user1",1,3); ApplicationAttemptId app4=createSchedulingRequest(1 * 1024,"queueB","user1",1,1); ApplicationAttemptId app5=createSchedulingRequest(1 * 1024,"queueB","user1",1,2); ApplicationAttemptId app6=createSchedulingRequest(1 * 1024,"queueB","user1",1,3); scheduler.update(); for (int i=0; i < 2; i++) { NodeUpdateSchedulerEvent nodeUpdate1=new NodeUpdateSchedulerEvent(node1); scheduler.handle(nodeUpdate1); NodeUpdateSchedulerEvent nodeUpdate2=new NodeUpdateSchedulerEvent(node2); scheduler.handle(nodeUpdate2); NodeUpdateSchedulerEvent nodeUpdate3=new NodeUpdateSchedulerEvent(node3); scheduler.handle(nodeUpdate3); } ApplicationAttemptId app7=createSchedulingRequest(1 * 1024,"queueC","user1",1,1); ApplicationAttemptId app8=createSchedulingRequest(1 * 1024,"queueC","user1",1,2); ApplicationAttemptId app9=createSchedulingRequest(1 * 1024,"queueC","user1",1,3); ApplicationAttemptId app10=createSchedulingRequest(1 * 1024,"queueD","user1",1,1); ApplicationAttemptId app11=createSchedulingRequest(1 * 1024,"queueD","user1",1,2); ApplicationAttemptId app12=createSchedulingRequest(1 * 1024,"queueD","user1",1,3); scheduler.update(); FSLeafQueue schedC=scheduler.getQueueManager().getLeafQueue("queueC",true); FSLeafQueue schedD=scheduler.getQueueManager().getLeafQueue("queueD",true); assertTrue(Resources.equals(Resources.none(),scheduler.resToPreempt(schedC,clock.getTime()))); assertTrue(Resources.equals(Resources.none(),scheduler.resToPreempt(schedD,clock.getTime()))); clock.tick(6); assertEquals(1024,scheduler.resToPreempt(schedC,clock.getTime()).getMemory()); assertEquals(1024,scheduler.resToPreempt(schedD,clock.getTime()).getMemory()); scheduler.update(); clock.tick(6); assertEquals(1536,scheduler.resToPreempt(schedC,clock.getTime()).getMemory()); assertEquals(1536,scheduler.resToPreempt(schedD,clock.getTime()).getMemory()); }

UtilityVerifier BooleanVerifier HybridVerifier 
@Test(timeout=30000) public void testConfValidation() throws Exception { FairScheduler scheduler=new FairScheduler(); Configuration conf=new YarnConfiguration(); conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB,2048); conf.setInt(YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_MB,1024); try { scheduler.serviceInit(conf); fail("Exception is expected because the min memory allocation is" + " larger than the max memory allocation."); } catch ( YarnRuntimeException e) { assertTrue("The thrown exception is not the expected one.",e.getMessage().startsWith("Invalid resource scheduler memory")); } conf=new YarnConfiguration(); conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES,2); conf.setInt(YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES,1); try { scheduler.serviceInit(conf); fail("Exception is expected because the min vcores allocation is" + " larger than the max vcores allocation."); } catch ( YarnRuntimeException e) { assertTrue("The thrown exception is not the expected one.",e.getMessage().startsWith("Invalid resource scheduler vcores")); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@SuppressWarnings("resource") @Test public void testBlacklistNodes() throws Exception { scheduler.init(conf); scheduler.start(); scheduler.reinitialize(conf,resourceManager.getRMContext()); final int GB=1024; String host="127.0.0.1"; RMNode node=MockNodes.newNodeInfo(1,Resources.createResource(16 * GB,16),0,host); NodeAddedSchedulerEvent nodeEvent=new NodeAddedSchedulerEvent(node); NodeUpdateSchedulerEvent updateEvent=new NodeUpdateSchedulerEvent(node); scheduler.handle(nodeEvent); ApplicationAttemptId appAttemptId=createSchedulingRequest(GB,"root.default","user",1); FSAppAttempt app=scheduler.getSchedulerApp(appAttemptId); scheduler.allocate(appAttemptId,Collections.emptyList(),Collections.emptyList(),Collections.singletonList(host),null); assertTrue(app.isBlacklisted(host)); scheduler.allocate(appAttemptId,Collections.emptyList(),Collections.emptyList(),null,Collections.singletonList(host)); assertFalse(scheduler.getSchedulerApp(appAttemptId).isBlacklisted(host)); List update=Arrays.asList(createResourceRequest(GB,node.getHostName(),1,0,true)); scheduler.allocate(appAttemptId,update,Collections.emptyList(),Collections.singletonList(host),null); assertTrue(app.isBlacklisted(host)); scheduler.update(); scheduler.handle(updateEvent); assertEquals("Incorrect number of containers allocated",0,app.getLiveContainers().size()); scheduler.allocate(appAttemptId,update,Collections.emptyList(),null,Collections.singletonList(host)); assertFalse(app.isBlacklisted(host)); createSchedulingRequest(GB,"root.default","user",1); scheduler.update(); scheduler.handle(updateEvent); assertEquals("Incorrect number of containers allocated",1,app.getLiveContainers().size()); }

InternalCallVerifier EqualityVerifier ExceptionVerifier HybridVerifier 
@Test(expected=YarnException.class) public void testMoveWouldViolateMaxResourcesConstraints() throws Exception { scheduler.init(conf); scheduler.start(); scheduler.reinitialize(conf,resourceManager.getRMContext()); QueueManager queueMgr=scheduler.getQueueManager(); FSLeafQueue oldQueue=queueMgr.getLeafQueue("queue1",true); queueMgr.getLeafQueue("queue2",true); scheduler.getAllocationConfiguration().maxQueueResources.put("root.queue2",Resource.newInstance(1024,1)); ApplicationAttemptId appAttId=createSchedulingRequest(1024,1,"queue1","user1",3); RMNode node=MockNodes.newNodeInfo(1,Resources.createResource(2048,2)); NodeAddedSchedulerEvent nodeEvent=new NodeAddedSchedulerEvent(node); NodeUpdateSchedulerEvent updateEvent=new NodeUpdateSchedulerEvent(node); scheduler.handle(nodeEvent); scheduler.handle(updateEvent); scheduler.handle(updateEvent); assertEquals(Resource.newInstance(2048,2),oldQueue.getResourceUsage()); scheduler.moveApplication(appAttId.getApplicationId(),"queue2"); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testHierarchicalQueueAllocationFileParsing() throws IOException, SAXException, AllocationConfigurationException, ParserConfigurationException { conf.set(FairSchedulerConfiguration.ALLOCATION_FILE,ALLOC_FILE); PrintWriter out=new PrintWriter(new FileWriter(ALLOC_FILE)); out.println(""); out.println(""); out.println(""); out.println("2048mb,0vcores"); out.println(""); out.println(""); out.println("2048mb,0vcores"); out.println(""); out.println("2048mb,0vcores"); out.println(""); out.println(""); out.println("2048mb,0vcores"); out.println(""); out.println(""); out.println(""); out.close(); scheduler.init(conf); scheduler.start(); scheduler.reinitialize(conf,resourceManager.getRMContext()); QueueManager queueManager=scheduler.getQueueManager(); Collection leafQueues=queueManager.getLeafQueues(); Assert.assertEquals(4,leafQueues.size()); Assert.assertNotNull(queueManager.getLeafQueue("queueA",false)); Assert.assertNotNull(queueManager.getLeafQueue("queueB.queueC",false)); Assert.assertNotNull(queueManager.getLeafQueue("queueB.queueD",false)); Assert.assertNotNull(queueManager.getLeafQueue("default",false)); Assert.assertEquals(4,leafQueues.size()); }

BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testThreadLifeCycle() throws InterruptedException { conf.setBoolean(FairSchedulerConfiguration.CONTINUOUS_SCHEDULING_ENABLED,true); scheduler.init(conf); scheduler.start(); Thread updateThread=scheduler.updateThread; Thread schedulingThread=scheduler.schedulingThread; assertTrue(updateThread.isAlive()); assertTrue(schedulingThread.isAlive()); scheduler.stop(); int numRetries=100; while (numRetries-- > 0 && (updateThread.isAlive() || schedulingThread.isAlive())) { Thread.sleep(50); } assertNotEquals("One of the threads is still alive",0,numRetries); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testHierarchicalQueuesSimilarParents() throws IOException { scheduler.init(conf); scheduler.start(); scheduler.reinitialize(conf,resourceManager.getRMContext()); QueueManager queueManager=scheduler.getQueueManager(); FSLeafQueue leafQueue=queueManager.getLeafQueue("parent.child",true); Assert.assertEquals(2,queueManager.getLeafQueues().size()); Assert.assertNotNull(leafQueue); Assert.assertEquals("root.parent.child",leafQueue.getName()); FSLeafQueue leafQueue2=queueManager.getLeafQueue("parent",true); Assert.assertNull(leafQueue2); Assert.assertEquals(2,queueManager.getLeafQueues().size()); FSLeafQueue leafQueue3=queueManager.getLeafQueue("parent.child.grandchild",true); Assert.assertNull(leafQueue3); Assert.assertEquals(2,queueManager.getLeafQueues().size()); FSLeafQueue leafQueue4=queueManager.getLeafQueue("parent.sister",true); Assert.assertNotNull(leafQueue4); Assert.assertEquals("root.parent.sister",leafQueue4.getName()); Assert.assertEquals(3,queueManager.getLeafQueues().size()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier 
@Test public void testMoveRunnableApp() throws Exception { scheduler.init(conf); scheduler.start(); scheduler.reinitialize(conf,resourceManager.getRMContext()); QueueManager queueMgr=scheduler.getQueueManager(); FSLeafQueue oldQueue=queueMgr.getLeafQueue("queue1",true); FSLeafQueue targetQueue=queueMgr.getLeafQueue("queue2",true); ApplicationAttemptId appAttId=createSchedulingRequest(1024,1,"queue1","user1",3); ApplicationId appId=appAttId.getApplicationId(); RMNode node=MockNodes.newNodeInfo(1,Resources.createResource(1024)); NodeAddedSchedulerEvent nodeEvent=new NodeAddedSchedulerEvent(node); NodeUpdateSchedulerEvent updateEvent=new NodeUpdateSchedulerEvent(node); scheduler.handle(nodeEvent); scheduler.handle(updateEvent); assertEquals(Resource.newInstance(1024,1),oldQueue.getResourceUsage()); scheduler.update(); assertEquals(Resource.newInstance(3072,3),oldQueue.getDemand()); scheduler.moveApplication(appId,"queue2"); FSAppAttempt app=scheduler.getSchedulerApp(appAttId); assertSame(targetQueue,app.getQueue()); assertFalse(oldQueue.getRunnableAppSchedulables().contains(app)); assertTrue(targetQueue.getRunnableAppSchedulables().contains(app)); assertEquals(Resource.newInstance(0,0),oldQueue.getResourceUsage()); assertEquals(Resource.newInstance(1024,1),targetQueue.getResourceUsage()); assertEquals(0,oldQueue.getNumRunnableApps()); assertEquals(1,targetQueue.getNumRunnableApps()); assertEquals(1,queueMgr.getRootQueue().getNumRunnableApps()); scheduler.update(); assertEquals(Resource.newInstance(0,0),oldQueue.getDemand()); assertEquals(Resource.newInstance(3072,3),targetQueue.getDemand()); }

InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier 
@Test public void testConfigureRootQueue() throws Exception { conf.set(FairSchedulerConfiguration.ALLOCATION_FILE,ALLOC_FILE); PrintWriter out=new PrintWriter(new FileWriter(ALLOC_FILE)); out.println(""); out.println(""); out.println("fair"); out.println(""); out.println(" drf"); out.println(" "); out.println(" 1024mb,1vcores"); out.println(" "); out.println(" "); out.println(" 1024mb,4vcores"); out.println(" "); out.println(""); out.println(""); out.close(); scheduler.init(conf); scheduler.start(); scheduler.reinitialize(conf,resourceManager.getRMContext()); QueueManager queueManager=scheduler.getQueueManager(); FSQueue root=queueManager.getRootQueue(); assertTrue(root.getPolicy() instanceof DominantResourceFairnessPolicy); assertNotNull(queueManager.getLeafQueue("child1",false)); assertNotNull(queueManager.getLeafQueue("child2",false)); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testEmptyQueueName() throws Exception { scheduler.init(conf); scheduler.start(); scheduler.reinitialize(conf,resourceManager.getRMContext()); assertEquals(1,scheduler.getQueueManager().getLeafQueues().size()); ApplicationAttemptId appAttemptId=createAppAttemptId(1,1); AppAddedSchedulerEvent appAddedEvent=new AppAddedSchedulerEvent(appAttemptId.getApplicationId(),"","user1"); scheduler.handle(appAddedEvent); assertEquals(1,scheduler.getQueueManager().getLeafQueues().size()); assertNull(scheduler.getSchedulerApp(appAttemptId)); assertEquals(0,resourceManager.getRMContext().getRMApps().size()); }

Class: org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.TestQueueManager

InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier 
@Test public void testReloadTurnsLeafToParentWithNoLeaf(){ AllocationConfiguration allocConf=new AllocationConfiguration(conf); allocConf.configuredQueues.get(FSQueueType.LEAF).add("root.queue1"); queueManager.updateAllocationConfiguration(allocConf); assertNotNull(queueManager.getLeafQueue("root.queue1",false)); notEmptyQueues.add(queueManager.getLeafQueue("root.queue1",false)); allocConf=new AllocationConfiguration(conf); allocConf.configuredQueues.get(FSQueueType.PARENT).add("root.queue1"); queueManager.updateAllocationConfiguration(allocConf); assertNotNull(queueManager.getLeafQueue("root.queue1",false)); assertNull(queueManager.getParentQueue("root.queue1",false)); notEmptyQueues.clear(); queueManager.updateAllocationConfiguration(allocConf); assertNull(queueManager.getLeafQueue("root.queue1",false)); assertNotNull(queueManager.getParentQueue("root.queue1",false)); assertTrue(queueManager.getParentQueue("root.queue1",false).getChildQueues().isEmpty()); }

Class: org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.TestFifoScheduler

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testGetAppsInQueue() throws Exception { Application application_0=new Application("user_0",resourceManager); application_0.submit(); Application application_1=new Application("user_0",resourceManager); application_1.submit(); ResourceScheduler scheduler=resourceManager.getResourceScheduler(); List appsInDefault=scheduler.getAppsInQueue("default"); assertTrue(appsInDefault.contains(application_0.getApplicationAttemptId())); assertTrue(appsInDefault.contains(application_1.getApplicationAttemptId())); assertEquals(2,appsInDefault.size()); Assert.assertNull(scheduler.getAppsInQueue("someotherqueue")); }

Class: org.apache.hadoop.yarn.server.resourcemanager.security.TestAMRMTokens

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier 
/** * Validate master-key-roll-over and that tokens are usable even after * master-key-roll-over. * @throws Exception */ @Test public void testMasterKeyRollOver() throws Exception { conf.setLong(YarnConfiguration.RM_AMRM_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS,rolling_interval_sec); conf.setLong(YarnConfiguration.RM_AM_EXPIRY_INTERVAL_MS,am_expire_ms); MyContainerManager containerManager=new MyContainerManager(); final MockRMWithAMS rm=new MockRMWithAMS(conf,containerManager); rm.start(); Long startTime=System.currentTimeMillis(); final Configuration conf=rm.getConfig(); final YarnRPC rpc=YarnRPC.create(conf); ApplicationMasterProtocol rmClient=null; AMRMTokenSecretManager appTokenSecretManager=rm.getRMContext().getAMRMTokenSecretManager(); MasterKeyData oldKey=appTokenSecretManager.getMasterKey(); Assert.assertNotNull(oldKey); try { MockNM nm1=rm.registerNode("localhost:1234",5120); RMApp app=rm.submitApp(1024); nm1.nodeHeartbeat(true); int waitCount=0; while (containerManager.containerTokens == null && waitCount++ < maxWaitAttempts) { LOG.info("Waiting for AM Launch to happen.."); Thread.sleep(1000); } Assert.assertNotNull(containerManager.containerTokens); RMAppAttempt attempt=app.getCurrentAppAttempt(); ApplicationAttemptId applicationAttemptId=attempt.getAppAttemptId(); UserGroupInformation currentUser=UserGroupInformation.createRemoteUser(applicationAttemptId.toString()); Credentials credentials=containerManager.getContainerCredentials(); final InetSocketAddress rmBindAddress=rm.getApplicationMasterService().getBindAddress(); Token amRMToken=MockRMWithAMS.setupAndReturnAMRMToken(rmBindAddress,credentials.getAllTokens()); currentUser.addToken(amRMToken); rmClient=createRMClient(rm,conf,rpc,currentUser); RegisterApplicationMasterRequest request=Records.newRecord(RegisterApplicationMasterRequest.class); rmClient.registerApplicationMaster(request); AllocateRequest allocateRequest=Records.newRecord(AllocateRequest.class); Assert.assertTrue(rmClient.allocate(allocateRequest).getAMCommand() == null); while (System.currentTimeMillis() - startTime < rolling_interval_sec * 1000) { rmClient.allocate(allocateRequest); Thread.sleep(500); } MasterKeyData newKey=appTokenSecretManager.getMasterKey(); Assert.assertNotNull(newKey); Assert.assertFalse("Master key should have changed!",oldKey.equals(newKey)); rpc.stopProxy(rmClient,conf); rmClient=createRMClient(rm,conf,rpc,currentUser); Assert.assertTrue(rmClient.allocate(allocateRequest).getAMCommand() == null); waitCount=0; while (waitCount++ <= maxWaitAttempts) { if (appTokenSecretManager.getCurrnetMasterKeyData() != oldKey) { break; } try { rmClient.allocate(allocateRequest); } catch ( Exception ex) { break; } Thread.sleep(200); } Assert.assertTrue(appTokenSecretManager.getCurrnetMasterKeyData().equals(newKey)); Assert.assertTrue(appTokenSecretManager.getMasterKey().equals(newKey)); Assert.assertTrue(appTokenSecretManager.getNextMasterKeyData() == null); Token newToken=appTokenSecretManager.createAndGetAMRMToken(applicationAttemptId); SecurityUtil.setTokenService(newToken,rmBindAddress); currentUser.addToken(newToken); rpc.stopProxy(rmClient,conf); rmClient=createRMClient(rm,conf,rpc,currentUser); allocateRequest=Records.newRecord(AllocateRequest.class); Assert.assertTrue(rmClient.allocate(allocateRequest).getAMCommand() == null); rpc.stopProxy(rmClient,conf); try { currentUser.addToken(amRMToken); rmClient=createRMClient(rm,conf,rpc,currentUser); allocateRequest=Records.newRecord(AllocateRequest.class); Assert.assertTrue(rmClient.allocate(allocateRequest).getAMCommand() == null); Assert.fail("The old Token should not work"); } catch ( Exception ex) { } } finally { rm.stop(); if (rmClient != null) { rpc.stopProxy(rmClient,conf); } } }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=20000) public void testAMRMMasterKeysUpdate() throws Exception { MockRM rm=new MockRM(conf){ @Override protected void doSecureLogin() throws IOException { } } ; rm.start(); MockNM nm=rm.registerNode("127.0.0.1:1234",8000); RMApp app=rm.submitApp(200); MockAM am=MockRM.launchAndRegisterAM(app,rm,nm); AllocateResponse response=am.allocate(Records.newRecord(AllocateRequest.class)); Assert.assertNull(response.getAMRMToken()); rm.getRMContext().getAMRMTokenSecretManager().rollMasterKey(); response=am.allocate(Records.newRecord(AllocateRequest.class)); Assert.assertNotNull(response.getAMRMToken()); Token amrmToken=ConverterUtils.convertFromYarn(response.getAMRMToken(),new Text(response.getAMRMToken().getService())); Assert.assertEquals(amrmToken.decodeIdentifier().getKeyId(),rm.getRMContext().getAMRMTokenSecretManager().getMasterKey().getMasterKey().getKeyId()); response=am.allocate(Records.newRecord(AllocateRequest.class)); Assert.assertNull(response.getAMRMToken()); rm.getRMContext().getAMRMTokenSecretManager().activateNextMasterKey(); response=am.allocate(Records.newRecord(AllocateRequest.class)); Assert.assertNull(response.getAMRMToken()); rm.stop(); }

UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier 
/** * Validate that application tokens are unusable after the * application-finishes. * @throws Exception */ @SuppressWarnings("unchecked") @Test public void testTokenExpiry() throws Exception { MyContainerManager containerManager=new MyContainerManager(); final MockRMWithAMS rm=new MockRMWithAMS(conf,containerManager); rm.start(); final Configuration conf=rm.getConfig(); final YarnRPC rpc=YarnRPC.create(conf); ApplicationMasterProtocol rmClient=null; try { MockNM nm1=rm.registerNode("localhost:1234",5120); RMApp app=rm.submitApp(1024); nm1.nodeHeartbeat(true); int waitCount=0; while (containerManager.containerTokens == null && waitCount++ < 20) { LOG.info("Waiting for AM Launch to happen.."); Thread.sleep(1000); } Assert.assertNotNull(containerManager.containerTokens); RMAppAttempt attempt=app.getCurrentAppAttempt(); ApplicationAttemptId applicationAttemptId=attempt.getAppAttemptId(); UserGroupInformation currentUser=UserGroupInformation.createRemoteUser(applicationAttemptId.toString()); Credentials credentials=containerManager.getContainerCredentials(); final InetSocketAddress rmBindAddress=rm.getApplicationMasterService().getBindAddress(); Token amRMToken=MockRMWithAMS.setupAndReturnAMRMToken(rmBindAddress,credentials.getAllTokens()); currentUser.addToken(amRMToken); rmClient=createRMClient(rm,conf,rpc,currentUser); RegisterApplicationMasterRequest request=Records.newRecord(RegisterApplicationMasterRequest.class); rmClient.registerApplicationMaster(request); FinishApplicationMasterRequest finishAMRequest=Records.newRecord(FinishApplicationMasterRequest.class); finishAMRequest.setFinalApplicationStatus(FinalApplicationStatus.SUCCEEDED); finishAMRequest.setDiagnostics("diagnostics"); finishAMRequest.setTrackingUrl("url"); rmClient.finishApplicationMaster(finishAMRequest); ContainerStatus containerStatus=BuilderUtils.newContainerStatus(attempt.getMasterContainer().getId(),ContainerState.COMPLETE,"AM Container Finished",0); rm.getRMContext().getDispatcher().getEventHandler().handle(new RMAppAttemptContainerFinishedEvent(applicationAttemptId,containerStatus)); int count=0; while (attempt.getState() != RMAppAttemptState.FINISHED && count < maxWaitAttempts) { Thread.sleep(100); count++; } Assert.assertTrue(attempt.getState() == RMAppAttemptState.FINISHED); rpc.stopProxy(rmClient,conf); rmClient=createRMClient(rm,conf,rpc,currentUser); AllocateRequest allocateRequest=Records.newRecord(AllocateRequest.class); try { rmClient.allocate(allocateRequest); Assert.fail("You got to be kidding me! " + "Using App tokens after app-finish should fail!"); } catch ( Throwable t) { LOG.info("Exception found is ",t); Assert.assertTrue(t.getCause().getMessage().contains(applicationAttemptId.toString() + " not found in AMRMTokenSecretManager.")); } } finally { rm.stop(); if (rmClient != null) { rpc.stopProxy(rmClient,conf); } } }

Class: org.apache.hadoop.yarn.server.resourcemanager.security.TestClientToAMTokens

UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier 
@Test public void testClientToAMTokens() throws Exception { final Configuration conf=new Configuration(); conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,"kerberos"); UserGroupInformation.setConfiguration(conf); ContainerManagementProtocol containerManager=mock(ContainerManagementProtocol.class); StartContainersResponse mockResponse=mock(StartContainersResponse.class); when(containerManager.startContainers((StartContainersRequest)any())).thenReturn(mockResponse); final DrainDispatcher dispatcher=new DrainDispatcher(); MockRM rm=new MockRMWithCustomAMLauncher(conf,containerManager){ protected ClientRMService createClientRMService(){ return new ClientRMService(this.rmContext,scheduler,this.rmAppManager,this.applicationACLsManager,this.queueACLsManager,getRMContext().getRMDelegationTokenSecretManager()); } @Override protected Dispatcher createDispatcher(){ return dispatcher; } @Override protected void doSecureLogin() throws IOException { } } ; rm.start(); RMApp app=rm.submitApp(1024); MockNM nm1=rm.registerNode("localhost:1234",3072); nm1.nodeHeartbeat(true); dispatcher.await(); nm1.nodeHeartbeat(true); dispatcher.await(); ApplicationAttemptId appAttempt=app.getCurrentAppAttempt().getAppAttemptId(); final MockAM mockAM=new MockAM(rm.getRMContext(),rm.getApplicationMasterService(),app.getCurrentAppAttempt().getAppAttemptId()); UserGroupInformation appUgi=UserGroupInformation.createRemoteUser(appAttempt.toString()); RegisterApplicationMasterResponse response=appUgi.doAs(new PrivilegedAction(){ @Override public RegisterApplicationMasterResponse run(){ RegisterApplicationMasterResponse response=null; try { response=mockAM.registerAppAttempt(); } catch ( Exception e) { Assert.fail("Exception was not expected"); } return response; } } ); GetApplicationReportRequest request=Records.newRecord(GetApplicationReportRequest.class); request.setApplicationId(app.getApplicationId()); GetApplicationReportResponse reportResponse=rm.getClientRMService().getApplicationReport(request); ApplicationReport appReport=reportResponse.getApplicationReport(); org.apache.hadoop.yarn.api.records.Token originalClientToAMToken=appReport.getClientToAMToken(); Assert.assertNotNull(response.getClientToAMTokenMasterKey()); Assert.assertTrue(response.getClientToAMTokenMasterKey().array().length > 0); ApplicationAttemptId appAttemptId=app.getAppAttempts().keySet().iterator().next(); Assert.assertNotNull(appAttemptId); final CustomAM am=new CustomAM(appAttemptId,response.getClientToAMTokenMasterKey().array()); am.init(conf); am.start(); SecurityUtil.setSecurityInfoProviders(new CustomSecurityInfo()); try { CustomProtocol client=(CustomProtocol)RPC.getProxy(CustomProtocol.class,1L,am.address,conf); client.ping(); fail("Access by unauthenticated user should fail!!"); } catch ( Exception e) { Assert.assertFalse(am.pinged); } Token token=ConverterUtils.convertFromYarn(originalClientToAMToken,am.address); verifyTokenWithTamperedID(conf,am,token); verifyTokenWithTamperedUserName(conf,am,token); verifyValidToken(conf,am,token); }

Class: org.apache.hadoop.yarn.server.resourcemanager.security.TestDelegationTokenRenewer

UtilityVerifier BooleanVerifier HybridVerifier 
@Test(timeout=20000) public void testAppSubmissionWithInvalidDelegationToken() throws Exception { Configuration conf=new Configuration(); conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,"kerberos"); UserGroupInformation.setConfiguration(conf); MockRM rm=new MockRM(conf); ByteBuffer tokens=ByteBuffer.wrap("BOGUS".getBytes()); ContainerLaunchContext amContainer=ContainerLaunchContext.newInstance(new HashMap(),new HashMap(),new ArrayList(),new HashMap(),tokens,new HashMap()); ApplicationSubmissionContext appSubContext=ApplicationSubmissionContext.newInstance(ApplicationId.newInstance(1234121,0),"BOGUS","default",Priority.UNDEFINED,amContainer,false,true,1,Resource.newInstance(1024,1),"BOGUS"); SubmitApplicationRequest request=SubmitApplicationRequest.newInstance(appSubContext); try { rm.getClientRMService().submitApplication(request); fail("Error was excepted."); } catch ( YarnException e) { Assert.assertTrue(e.getMessage().contains("Bad header found in token storage")); } }

UtilityVerifier BooleanVerifier HybridVerifier 
@Test(timeout=20000) public void testDTRonAppSubmission() throws IOException, InterruptedException, BrokenBarrierException { final Credentials credsx=new Credentials(); final Token tokenx=mock(Token.class); credsx.addToken(new Text("token"),tokenx); doReturn(true).when(tokenx).isManaged(); doThrow(new IOException("boom")).when(tokenx).renew(any(Configuration.class)); final DelegationTokenRenewer dtr=createNewDelegationTokenRenewer(conf,counter); RMContext mockContext=mock(RMContext.class); ClientRMService mockClientRMService=mock(ClientRMService.class); when(mockContext.getClientRMService()).thenReturn(mockClientRMService); InetSocketAddress sockAddr=InetSocketAddress.createUnresolved("localhost",1234); when(mockClientRMService.getBindAddress()).thenReturn(sockAddr); dtr.setRMContext(mockContext); when(mockContext.getDelegationTokenRenewer()).thenReturn(dtr); dtr.init(conf); dtr.start(); try { dtr.addApplicationSync(mock(ApplicationId.class),credsx,false); fail("Catch IOException on app submission"); } catch ( IOException e) { Assert.assertTrue(e.getMessage().contains(tokenx.toString())); Assert.assertTrue(e.getCause().toString().contains("boom")); } }

APIUtilityVerifier IterativeVerifier BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
@Test(timeout=60000) public void testAppRejectionWithCancelledDelegationToken() throws Exception { MyFS dfs=(MyFS)FileSystem.get(conf); LOG.info("dfs=" + (Object)dfs.hashCode() + ";conf="+ conf.hashCode()); MyToken token=dfs.getDelegationToken("user1"); token.cancelToken(); Credentials ts=new Credentials(); ts.addToken(token.getKind(),token); ApplicationId appId=BuilderUtils.newApplicationId(0,0); delegationTokenRenewer.addApplicationAsync(appId,ts,true); int waitCnt=20; while (waitCnt-- > 0) { if (!eventQueue.isEmpty()) { Event evt=eventQueue.take(); if (evt.getType() == RMAppEventType.APP_REJECTED) { Assert.assertTrue(((RMAppEvent)evt).getApplicationId().equals(appId)); return; } } else { Thread.sleep(500); } } fail("App submission with a cancelled token should have failed"); }

APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
/** * Basic idea of the test: * 1. create tokens. * 2. Mark one of them to be renewed in 2 seconds (instead of * 24 hours) * 3. register them for renewal * 4. sleep for 3 seconds * 5. count number of renewals (should 3 initial ones + one extra) * 6. register another token for 2 seconds * 7. cancel it immediately * 8. Sleep and check that the 2 seconds renew didn't happen * (totally 5 renewals) * 9. check cancellation * @throws IOException * @throws URISyntaxException */ @Test(timeout=60000) public void testDTRenewal() throws Exception { MyFS dfs=(MyFS)FileSystem.get(conf); LOG.info("dfs=" + (Object)dfs.hashCode() + ";conf="+ conf.hashCode()); MyToken token1, token2, token3; token1=dfs.getDelegationToken("user1"); token2=dfs.getDelegationToken("user2"); token3=dfs.getDelegationToken("user3"); Renewer.tokenToRenewIn2Sec=token1; LOG.info("token=" + token1 + " should be renewed for 2 secs"); String nn1=DelegationTokenRenewer.SCHEME + "://host1:0"; String nn2=DelegationTokenRenewer.SCHEME + "://host2:0"; String nn3=DelegationTokenRenewer.SCHEME + "://host3:0"; Credentials ts=new Credentials(); ts.addToken(new Text(nn1),token1); ts.addToken(new Text(nn2),token2); ts.addToken(new Text(nn3),token3); ApplicationId applicationId_0=BuilderUtils.newApplicationId(0,0); delegationTokenRenewer.addApplicationAsync(applicationId_0,ts,true); waitForEventsToGetProcessed(delegationTokenRenewer); int numberOfExpectedRenewals=3 + 1; int attempts=10; while (attempts-- > 0) { try { Thread.sleep(3 * 1000); } catch ( InterruptedException e) { } if (Renewer.counter == numberOfExpectedRenewals) break; } LOG.info("dfs=" + dfs.hashCode() + ";Counter = "+ Renewer.counter+ ";t="+ Renewer.lastRenewed); assertEquals("renew wasn't called as many times as expected(4):",numberOfExpectedRenewals,Renewer.counter); assertEquals("most recently renewed token mismatch",Renewer.lastRenewed,token1); ts=new Credentials(); MyToken token4=dfs.getDelegationToken("user4"); Renewer.tokenToRenewIn2Sec=token4; LOG.info("token=" + token4 + " should be renewed for 2 secs"); String nn4=DelegationTokenRenewer.SCHEME + "://host4:0"; ts.addToken(new Text(nn4),token4); ApplicationId applicationId_1=BuilderUtils.newApplicationId(0,1); delegationTokenRenewer.addApplicationAsync(applicationId_1,ts,true); waitForEventsToGetProcessed(delegationTokenRenewer); delegationTokenRenewer.applicationFinished(applicationId_1); waitForEventsToGetProcessed(delegationTokenRenewer); numberOfExpectedRenewals=Renewer.counter; try { Thread.sleep(6 * 1000); } catch ( InterruptedException e) { } LOG.info("Counter = " + Renewer.counter + ";t="+ Renewer.lastRenewed); assertEquals("renew wasn't called as many times as expected",numberOfExpectedRenewals,Renewer.counter); try { token4.renew(conf); fail("Renewal of cancelled token should have failed"); } catch ( InvalidToken ite) { } }

APIUtilityVerifier BranchVerifier UtilityVerifier EqualityVerifier HybridVerifier 
/** * Basic idea of the test: * 0. Setup token KEEP_ALIVE * 1. create tokens. * 2. register them for renewal - to be cancelled on app complete * 3. Complete app. * 4. Verify token is alive within the KEEP_ALIVE time * 5. Verify token has been cancelled after the KEEP_ALIVE_TIME * @throws IOException * @throws URISyntaxException */ @Test(timeout=60000) public void testDTKeepAlive1() throws Exception { Configuration lconf=new Configuration(conf); lconf.setBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED,true); lconf.setLong(YarnConfiguration.RM_NM_EXPIRY_INTERVAL_MS,6000l); lconf.setLong(YarnConfiguration.RM_DELAYED_DELEGATION_TOKEN_REMOVAL_INTERVAL_MS,1000l); DelegationTokenRenewer localDtr=createNewDelegationTokenRenewer(lconf,counter); RMContext mockContext=mock(RMContext.class); ClientRMService mockClientRMService=mock(ClientRMService.class); when(mockContext.getClientRMService()).thenReturn(mockClientRMService); when(mockContext.getDelegationTokenRenewer()).thenReturn(localDtr); when(mockContext.getDispatcher()).thenReturn(dispatcher); InetSocketAddress sockAddr=InetSocketAddress.createUnresolved("localhost",1234); when(mockClientRMService.getBindAddress()).thenReturn(sockAddr); localDtr.setRMContext(mockContext); localDtr.init(lconf); localDtr.start(); MyFS dfs=(MyFS)FileSystem.get(lconf); LOG.info("dfs=" + (Object)dfs.hashCode() + ";conf="+ lconf.hashCode()); Credentials ts=new Credentials(); MyToken token1=dfs.getDelegationToken("user1"); String nn1=DelegationTokenRenewer.SCHEME + "://host1:0"; ts.addToken(new Text(nn1),token1); ApplicationId applicationId_0=BuilderUtils.newApplicationId(0,0); localDtr.addApplicationAsync(applicationId_0,ts,true); waitForEventsToGetProcessed(localDtr); if (!eventQueue.isEmpty()) { Event evt=eventQueue.take(); if (evt instanceof RMAppEvent) { Assert.assertEquals(((RMAppEvent)evt).getType(),RMAppEventType.START); } else { fail("RMAppEvent.START was expected!!"); } } localDtr.applicationFinished(applicationId_0); waitForEventsToGetProcessed(localDtr); token1.renew(lconf); Thread.sleep(10000l); try { token1.renew(lconf); fail("Renewal of cancelled token should have failed"); } catch ( InvalidToken ite) { } }

Class: org.apache.hadoop.yarn.server.resourcemanager.webapp.TestRMWebServices

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testInvalidAccept() throws JSONException, Exception { WebResource r=resource(); String responseStr=""; try { responseStr=r.path("ws").path("v1").path("cluster").accept(MediaType.TEXT_PLAIN).get(String.class); fail("should have thrown exception on invalid uri"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.INTERNAL_SERVER_ERROR,response.getClientResponseStatus()); WebServicesTestUtils.checkStringMatch("error string exists and shouldn't","",responseStr); } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testInvalidUri2() throws JSONException, Exception { WebResource r=resource(); String responseStr=""; try { responseStr=r.accept(MediaType.APPLICATION_JSON).get(String.class); fail("should have thrown exception on invalid uri"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.NOT_FOUND,response.getClientResponseStatus()); WebServicesTestUtils.checkStringMatch("error string exists and shouldn't","",responseStr); } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testInvalidUri() throws JSONException, Exception { WebResource r=resource(); String responseStr=""; try { responseStr=r.path("ws").path("v1").path("cluster").path("bogus").accept(MediaType.APPLICATION_JSON).get(String.class); fail("should have thrown exception on invalid uri"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.NOT_FOUND,response.getClientResponseStatus()); WebServicesTestUtils.checkStringMatch("error string exists and shouldn't","",responseStr); } }

Class: org.apache.hadoop.yarn.server.resourcemanager.webapp.TestRMWebServicesApps

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testAppsQueryStates() throws JSONException, Exception { rm.start(); MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048); rm.submitApp(CONTAINER_MB); RMApp killedApp=rm.submitApp(CONTAINER_MB); rm.killApp(killedApp.getApplicationId()); amNodeManager.nodeHeartbeat(true); WebResource r=resource(); MultivaluedMapImpl params=new MultivaluedMapImpl(); params.add("states",YarnApplicationState.ACCEPTED.toString()); ClientResponse response=r.path("ws").path("v1").path("cluster").path("apps").queryParams(params).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject apps=json.getJSONObject("apps"); assertEquals("incorrect number of elements",1,apps.length()); JSONArray array=apps.getJSONArray("app"); assertEquals("incorrect number of elements",1,array.length()); assertEquals("state not equal to ACCEPTED","ACCEPTED",array.getJSONObject(0).getString("state")); r=resource(); params=new MultivaluedMapImpl(); params.add("states",YarnApplicationState.ACCEPTED.toString()); params.add("states",YarnApplicationState.KILLED.toString()); response=r.path("ws").path("v1").path("cluster").path("apps").queryParams(params).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); apps=json.getJSONObject("apps"); assertEquals("incorrect number of elements",1,apps.length()); array=apps.getJSONArray("app"); assertEquals("incorrect number of elements",2,array.length()); assertTrue("both app states of ACCEPTED and KILLED are not present",(array.getJSONObject(0).getString("state").equals("ACCEPTED") && array.getJSONObject(1).getString("state").equals("KILLED")) || (array.getJSONObject(0).getString("state").equals("KILLED") && array.getJSONObject(1).getString("state").equals("ACCEPTED"))); rm.stop(); }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testNonexistApp() throws JSONException, Exception { rm.start(); MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048); rm.submitApp(CONTAINER_MB,"testwordcount","user1"); amNodeManager.nodeHeartbeat(true); WebResource r=resource(); try { r.path("ws").path("v1").path("cluster").path("apps").path("application_00000_0099").accept(MediaType.APPLICATION_JSON).get(JSONObject.class); fail("should have thrown exception on invalid appid"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.NOT_FOUND,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: app with id: application_00000_0099 not found",message); WebServicesTestUtils.checkStringMatch("exception type","NotFoundException",type); WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.NotFoundException",classname); } finally { rm.stop(); } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testAppsQueryFinalStatusInvalid() throws JSONException, Exception { rm.start(); MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048); rm.submitApp(CONTAINER_MB); amNodeManager.nodeHeartbeat(true); WebResource r=resource(); try { r.path("ws").path("v1").path("cluster").path("apps").queryParam("finalStatus","INVALID_test").accept(MediaType.APPLICATION_JSON).get(JSONObject.class); fail("should have thrown exception on invalid state query"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); WebServicesTestUtils.checkStringContains("exception message","org.apache.hadoop.yarn.api.records.FinalApplicationStatus.INVALID_test",message); WebServicesTestUtils.checkStringMatch("exception type","IllegalArgumentException",type); WebServicesTestUtils.checkStringMatch("exception classname","java.lang.IllegalArgumentException",classname); } finally { rm.stop(); } }

IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testAppStatistics() throws JSONException, Exception { try { rm.start(); MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",4096); Thread.sleep(1); RMApp app1=rm.submitApp(CONTAINER_MB,"",UserGroupInformation.getCurrentUser().getShortUserName(),null,false,null,2,null,"MAPREDUCE"); amNodeManager.nodeHeartbeat(true); MockAM am=rm.sendAMLaunched(app1.getCurrentAppAttempt().getAppAttemptId()); am.registerAppAttempt(); am.unregisterAppAttempt(); amNodeManager.nodeHeartbeat(app1.getCurrentAppAttempt().getAppAttemptId(),1,ContainerState.COMPLETE); rm.submitApp(CONTAINER_MB,"",UserGroupInformation.getCurrentUser().getShortUserName(),null,false,null,2,null,"MAPREDUCE"); rm.submitApp(CONTAINER_MB,"",UserGroupInformation.getCurrentUser().getShortUserName(),null,false,null,2,null,"OTHER"); WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("cluster").path("appstatistics").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject appsStatInfo=json.getJSONObject("appStatInfo"); assertEquals("incorrect number of elements",1,appsStatInfo.length()); JSONArray statItems=appsStatInfo.getJSONArray("statItem"); assertEquals("incorrect number of elements",YarnApplicationState.values().length,statItems.length()); for (int i=0; i < YarnApplicationState.values().length; ++i) { assertEquals("*",statItems.getJSONObject(0).getString("type")); if (statItems.getJSONObject(0).getString("state").equals("ACCEPTED")) { assertEquals("2",statItems.getJSONObject(0).getString("count")); } else if (statItems.getJSONObject(0).getString("state").equals("FINISHED")) { assertEquals("1",statItems.getJSONObject(0).getString("count")); } else { assertEquals("0",statItems.getJSONObject(0).getString("count")); } } r=resource(); response=r.path("ws").path("v1").path("cluster").path("appstatistics").queryParam("states",YarnApplicationState.ACCEPTED.toString()).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); appsStatInfo=json.getJSONObject("appStatInfo"); assertEquals("incorrect number of elements",1,appsStatInfo.length()); statItems=appsStatInfo.getJSONArray("statItem"); assertEquals("incorrect number of elements",1,statItems.length()); assertEquals("ACCEPTED",statItems.getJSONObject(0).getString("state")); assertEquals("*",statItems.getJSONObject(0).getString("type")); assertEquals("2",statItems.getJSONObject(0).getString("count")); r=resource(); response=r.path("ws").path("v1").path("cluster").path("appstatistics").queryParam("applicationTypes","MAPREDUCE").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); appsStatInfo=json.getJSONObject("appStatInfo"); assertEquals("incorrect number of elements",1,appsStatInfo.length()); statItems=appsStatInfo.getJSONArray("statItem"); assertEquals("incorrect number of elements",YarnApplicationState.values().length,statItems.length()); for (int i=0; i < YarnApplicationState.values().length; ++i) { assertEquals("mapreduce",statItems.getJSONObject(0).getString("type")); if (statItems.getJSONObject(0).getString("state").equals("ACCEPTED")) { assertEquals("1",statItems.getJSONObject(0).getString("count")); } else if (statItems.getJSONObject(0).getString("state").equals("FINISHED")) { assertEquals("1",statItems.getJSONObject(0).getString("count")); } else { assertEquals("0",statItems.getJSONObject(0).getString("count")); } } r=resource(); response=r.path("ws").path("v1").path("cluster").path("appstatistics").queryParam("applicationTypes","MAPREDUCE,OTHER").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject exception=json.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String className=exception.getString("javaClassName"); WebServicesTestUtils.checkStringContains("exception message","we temporarily support at most one applicationType",message); WebServicesTestUtils.checkStringEqual("exception type","BadRequestException",type); WebServicesTestUtils.checkStringEqual("exception className","org.apache.hadoop.yarn.webapp.BadRequestException",className); r=resource(); response=r.path("ws").path("v1").path("cluster").path("appstatistics").queryParam("states",YarnApplicationState.FINISHED.toString() + "," + YarnApplicationState.ACCEPTED.toString()).queryParam("applicationTypes","MAPREDUCE").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); appsStatInfo=json.getJSONObject("appStatInfo"); assertEquals("incorrect number of elements",1,appsStatInfo.length()); statItems=appsStatInfo.getJSONArray("statItem"); assertEquals("incorrect number of elements",2,statItems.length()); JSONObject statItem1=statItems.getJSONObject(0); JSONObject statItem2=statItems.getJSONObject(1); assertTrue((statItem1.getString("state").equals("ACCEPTED") && statItem2.getString("state").equals("FINISHED")) || (statItem2.getString("state").equals("ACCEPTED") && statItem1.getString("state").equals("FINISHED"))); assertEquals("mapreduce",statItem1.getString("type")); assertEquals("1",statItem1.getString("count")); assertEquals("mapreduce",statItem2.getString("type")); assertEquals("1",statItem2.getString("count")); r=resource(); response=r.path("ws").path("v1").path("cluster").path("appstatistics").queryParam("states","wrong_state").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); exception=json.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); message=exception.getString("message"); type=exception.getString("exception"); className=exception.getString("javaClassName"); WebServicesTestUtils.checkStringContains("exception message","Invalid application-state wrong_state",message); WebServicesTestUtils.checkStringEqual("exception type","BadRequestException",type); WebServicesTestUtils.checkStringEqual("exception className","org.apache.hadoop.yarn.webapp.BadRequestException",className); } finally { rm.stop(); } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testInvalidApp() throws JSONException, Exception { rm.start(); MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048); rm.submitApp(CONTAINER_MB); amNodeManager.nodeHeartbeat(true); WebResource r=resource(); try { r.path("ws").path("v1").path("cluster").path("apps").path("application_invalid_12").accept(MediaType.APPLICATION_JSON).get(JSONObject.class); fail("should have thrown exception on invalid appid"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); WebServicesTestUtils.checkStringMatch("exception message","For input string: \"invalid\"",message); WebServicesTestUtils.checkStringMatch("exception type","NumberFormatException",type); WebServicesTestUtils.checkStringMatch("exception classname","java.lang.NumberFormatException",classname); } finally { rm.stop(); } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testAppsQueryStatesInvalid() throws JSONException, Exception { rm.start(); MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048); rm.submitApp(CONTAINER_MB); amNodeManager.nodeHeartbeat(true); WebResource r=resource(); try { r.path("ws").path("v1").path("cluster").path("apps").queryParam("states","INVALID_test").accept(MediaType.APPLICATION_JSON).get(JSONObject.class); fail("should have thrown exception on invalid state query"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); WebServicesTestUtils.checkStringContains("exception message","Invalid application-state INVALID_test",message); WebServicesTestUtils.checkStringMatch("exception type","BadRequestException",type); WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.BadRequestException",classname); } finally { rm.stop(); } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testInvalidAppAttempts() throws JSONException, Exception { rm.start(); MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048); rm.submitApp(CONTAINER_MB); amNodeManager.nodeHeartbeat(true); WebResource r=resource(); try { r.path("ws").path("v1").path("cluster").path("apps").path("application_invalid_12").accept(MediaType.APPLICATION_JSON).get(JSONObject.class); fail("should have thrown exception on invalid appid"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); WebServicesTestUtils.checkStringMatch("exception message","For input string: \"invalid\"",message); WebServicesTestUtils.checkStringMatch("exception type","NumberFormatException",type); WebServicesTestUtils.checkStringMatch("exception classname","java.lang.NumberFormatException",classname); } finally { rm.stop(); } }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testAppsQueryAppTypes() throws JSONException, Exception { rm.start(); MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048); Thread.sleep(1); RMApp app1=rm.submitApp(CONTAINER_MB); amNodeManager.nodeHeartbeat(true); MockAM am=rm.sendAMLaunched(app1.getCurrentAppAttempt().getAppAttemptId()); am.registerAppAttempt(); am.unregisterAppAttempt(); amNodeManager.nodeHeartbeat(app1.getCurrentAppAttempt().getAppAttemptId(),1,ContainerState.COMPLETE); rm.submitApp(CONTAINER_MB,"",UserGroupInformation.getCurrentUser().getShortUserName(),null,false,null,2,null,"MAPREDUCE"); rm.submitApp(CONTAINER_MB,"",UserGroupInformation.getCurrentUser().getShortUserName(),null,false,null,2,null,"NON-YARN"); WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("applicationTypes","MAPREDUCE").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject apps=json.getJSONObject("apps"); assertEquals("incorrect number of elements",1,apps.length()); JSONArray array=apps.getJSONArray("app"); assertEquals("incorrect number of elements",1,array.length()); assertEquals("MAPREDUCE",array.getJSONObject(0).getString("applicationType")); r=resource(); response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("applicationTypes","YARN").queryParam("applicationTypes","MAPREDUCE").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); apps=json.getJSONObject("apps"); assertEquals("incorrect number of elements",1,apps.length()); array=apps.getJSONArray("app"); assertEquals("incorrect number of elements",2,array.length()); assertTrue((array.getJSONObject(0).getString("applicationType").equals("YARN") && array.getJSONObject(1).getString("applicationType").equals("MAPREDUCE")) || (array.getJSONObject(1).getString("applicationType").equals("YARN") && array.getJSONObject(0).getString("applicationType").equals("MAPREDUCE"))); r=resource(); response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("applicationTypes","YARN,NON-YARN").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); apps=json.getJSONObject("apps"); assertEquals("incorrect number of elements",1,apps.length()); array=apps.getJSONArray("app"); assertEquals("incorrect number of elements",2,array.length()); assertTrue((array.getJSONObject(0).getString("applicationType").equals("YARN") && array.getJSONObject(1).getString("applicationType").equals("NON-YARN")) || (array.getJSONObject(1).getString("applicationType").equals("YARN") && array.getJSONObject(0).getString("applicationType").equals("NON-YARN"))); r=resource(); response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("applicationTypes","").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); apps=json.getJSONObject("apps"); assertEquals("incorrect number of elements",1,apps.length()); array=apps.getJSONArray("app"); assertEquals("incorrect number of elements",3,array.length()); r=resource(); response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("applicationTypes","YARN,NON-YARN").queryParam("applicationTypes","MAPREDUCE").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); apps=json.getJSONObject("apps"); assertEquals("incorrect number of elements",1,apps.length()); array=apps.getJSONArray("app"); assertEquals("incorrect number of elements",3,array.length()); r=resource(); response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("applicationTypes","YARN").queryParam("applicationTypes","").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); apps=json.getJSONObject("apps"); assertEquals("incorrect number of elements",1,apps.length()); array=apps.getJSONArray("app"); assertEquals("incorrect number of elements",1,array.length()); assertEquals("YARN",array.getJSONObject(0).getString("applicationType")); r=resource(); response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("applicationTypes",",,, ,, YARN ,, ,").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); apps=json.getJSONObject("apps"); assertEquals("incorrect number of elements",1,apps.length()); array=apps.getJSONArray("app"); assertEquals("incorrect number of elements",1,array.length()); assertEquals("YARN",array.getJSONObject(0).getString("applicationType")); r=resource(); response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("applicationTypes",",,, ,, ,, ,").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); apps=json.getJSONObject("apps"); assertEquals("incorrect number of elements",1,apps.length()); array=apps.getJSONArray("app"); assertEquals("incorrect number of elements",3,array.length()); r=resource(); response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("applicationTypes","YARN, ,NON-YARN, ,,").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); apps=json.getJSONObject("apps"); assertEquals("incorrect number of elements",1,apps.length()); array=apps.getJSONArray("app"); assertEquals("incorrect number of elements",2,array.length()); assertTrue((array.getJSONObject(0).getString("applicationType").equals("YARN") && array.getJSONObject(1).getString("applicationType").equals("NON-YARN")) || (array.getJSONObject(1).getString("applicationType").equals("YARN") && array.getJSONObject(0).getString("applicationType").equals("NON-YARN"))); r=resource(); response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("applicationTypes"," YARN, , ,,,").queryParam("applicationTypes","MAPREDUCE , ,, ,").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); apps=json.getJSONObject("apps"); assertEquals("incorrect number of elements",1,apps.length()); array=apps.getJSONArray("app"); assertEquals("incorrect number of elements",2,array.length()); assertTrue((array.getJSONObject(0).getString("applicationType").equals("YARN") && array.getJSONObject(1).getString("applicationType").equals("MAPREDUCE")) || (array.getJSONObject(1).getString("applicationType").equals("YARN") && array.getJSONObject(0).getString("applicationType").equals("MAPREDUCE"))); rm.stop(); }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testNonexistAppAttempts() throws JSONException, Exception { rm.start(); MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048); rm.submitApp(CONTAINER_MB,"testwordcount","user1"); amNodeManager.nodeHeartbeat(true); WebResource r=resource(); try { r.path("ws").path("v1").path("cluster").path("apps").path("application_00000_0099").accept(MediaType.APPLICATION_JSON).get(JSONObject.class); fail("should have thrown exception on invalid appid"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.NOT_FOUND,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: app with id: application_00000_0099 not found",message); WebServicesTestUtils.checkStringMatch("exception type","NotFoundException",type); WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.NotFoundException",classname); } finally { rm.stop(); } }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=20000) public void testMultipleAppAttempts() throws JSONException, Exception { rm.start(); MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",8192); RMApp app1=rm.submitApp(CONTAINER_MB,"testwordcount","user1"); MockAM am=MockRM.launchAndRegisterAM(app1,rm,amNodeManager); int maxAppAttempts=rm.getConfig().getInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS); assertTrue(maxAppAttempts > 1); int numAttempt=1; while (true) { amNodeManager.nodeHeartbeat(am.getApplicationAttemptId(),1,ContainerState.COMPLETE); am.waitForState(RMAppAttemptState.FAILED); if (numAttempt == maxAppAttempts) { rm.waitForState(app1.getApplicationId(),RMAppState.FAILED); break; } rm.waitForState(app1.getApplicationId(),RMAppState.ACCEPTED); am=MockRM.launchAndRegisterAM(app1,rm,amNodeManager); numAttempt++; } assertEquals("incorrect number of attempts",maxAppAttempts,app1.getAppAttempts().values().size()); testAppAttemptsHelper(app1.getApplicationId().toString(),app1,MediaType.APPLICATION_JSON); rm.stop(); }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testAppsQueryStateInvalid() throws JSONException, Exception { rm.start(); MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048); rm.submitApp(CONTAINER_MB); amNodeManager.nodeHeartbeat(true); WebResource r=resource(); try { r.path("ws").path("v1").path("cluster").path("apps").queryParam("state","INVALID_test").accept(MediaType.APPLICATION_JSON).get(JSONObject.class); fail("should have thrown exception on invalid state query"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); WebServicesTestUtils.checkStringContains("exception message","Invalid application-state INVALID_test",message); WebServicesTestUtils.checkStringMatch("exception type","BadRequestException",type); WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.BadRequestException",classname); } finally { rm.stop(); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testAppsQueryStatesComma() throws JSONException, Exception { rm.start(); MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048); rm.submitApp(CONTAINER_MB); RMApp killedApp=rm.submitApp(CONTAINER_MB); rm.killApp(killedApp.getApplicationId()); amNodeManager.nodeHeartbeat(true); WebResource r=resource(); MultivaluedMapImpl params=new MultivaluedMapImpl(); params.add("states",YarnApplicationState.ACCEPTED.toString()); ClientResponse response=r.path("ws").path("v1").path("cluster").path("apps").queryParams(params).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject apps=json.getJSONObject("apps"); assertEquals("incorrect number of elements",1,apps.length()); JSONArray array=apps.getJSONArray("app"); assertEquals("incorrect number of elements",1,array.length()); assertEquals("state not equal to ACCEPTED","ACCEPTED",array.getJSONObject(0).getString("state")); r=resource(); params=new MultivaluedMapImpl(); params.add("states",YarnApplicationState.ACCEPTED.toString() + "," + YarnApplicationState.KILLED.toString()); response=r.path("ws").path("v1").path("cluster").path("apps").queryParams(params).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); apps=json.getJSONObject("apps"); assertEquals("incorrect number of elements",1,apps.length()); array=apps.getJSONArray("app"); assertEquals("incorrect number of elements",2,array.length()); assertTrue("both app states of ACCEPTED and KILLED are not present",(array.getJSONObject(0).getString("state").equals("ACCEPTED") && array.getJSONObject(1).getString("state").equals("KILLED")) || (array.getJSONObject(0).getString("state").equals("KILLED") && array.getJSONObject(1).getString("state").equals("ACCEPTED"))); rm.stop(); }

Class: org.apache.hadoop.yarn.server.resourcemanager.webapp.TestRMWebServicesAppsModification

APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=90000) public void testSingleAppKill() throws Exception { rm.start(); MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048); String[] mediaTypes={MediaType.APPLICATION_JSON,MediaType.APPLICATION_XML}; MediaType[] contentTypes={MediaType.APPLICATION_JSON_TYPE,MediaType.APPLICATION_XML_TYPE}; for ( String mediaType : mediaTypes) { for ( MediaType contentType : contentTypes) { RMApp app=rm.submitApp(CONTAINER_MB,"",webserviceUserName); amNodeManager.nodeHeartbeat(true); ClientResponse response=this.constructWebResource("apps",app.getApplicationId().toString(),"state").accept(mediaType).get(ClientResponse.class); AppState targetState=new AppState(YarnApplicationState.KILLED.toString()); Object entity; if (contentType == MediaType.APPLICATION_JSON_TYPE) { entity=appStateToJSON(targetState); } else { entity=targetState; } response=this.constructWebResource("apps",app.getApplicationId().toString(),"state").entity(entity,contentType).accept(mediaType).put(ClientResponse.class); if (!isAuthenticationEnabled()) { assertEquals(Status.UNAUTHORIZED,response.getClientResponseStatus()); continue; } assertEquals(Status.ACCEPTED,response.getClientResponseStatus()); if (mediaType == MediaType.APPLICATION_JSON) { verifyAppStateJson(response,RMAppState.KILLING,RMAppState.ACCEPTED); } else { verifyAppStateXML(response,RMAppState.KILLING,RMAppState.ACCEPTED); } String locationHeaderValue=response.getHeaders().getFirst(HttpHeaders.LOCATION); Client c=Client.create(); WebResource tmp=c.resource(locationHeaderValue); if (isAuthenticationEnabled()) { tmp=tmp.queryParam("user.name",webserviceUserName); } response=tmp.get(ClientResponse.class); assertEquals(Status.OK,response.getClientResponseStatus()); assertTrue(locationHeaderValue.endsWith("/ws/v1/cluster/apps/" + app.getApplicationId().toString() + "/state")); while (true) { Thread.sleep(100); response=this.constructWebResource("apps",app.getApplicationId().toString(),"state").accept(mediaType).entity(entity,contentType).put(ClientResponse.class); assertTrue((response.getClientResponseStatus() == Status.ACCEPTED) || (response.getClientResponseStatus() == Status.OK)); if (response.getClientResponseStatus() == Status.OK) { assertEquals(RMAppState.KILLED,app.getState()); if (mediaType == MediaType.APPLICATION_JSON) { verifyAppStateJson(response,RMAppState.KILLED); } else { verifyAppStateXML(response,RMAppState.KILLED); } break; } } } } rm.stop(); return; }

Class: org.apache.hadoop.yarn.server.resourcemanager.webapp.TestRMWebServicesCapacitySched

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testPerUserResourcesJSON() throws Exception { rm.start(); try { rm.submitApp(10,"app1","user1",null,"b1"); rm.submitApp(20,"app2","user2",null,"b1"); WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("cluster").path("scheduler/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); JSONObject schedulerInfo=json.getJSONObject("scheduler").getJSONObject("schedulerInfo"); JSONObject b1=getSubQueue(getSubQueue(schedulerInfo,"b"),"b1"); JSONArray users=b1.getJSONObject("users").getJSONArray("user"); for (int i=0; i < 2; ++i) { JSONObject user=users.getJSONObject(i); assertTrue("User isn't user1 or user2",user.getString("username").equals("user1") || user.getString("username").equals("user2")); user.getInt("numActiveApplications"); user.getInt("numPendingApplications"); checkResourcesUsed(user); } } finally { rm.stop(); } }

APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test per user resources and resourcesUsed elements in the web services XML * @throws Exception */ @Test public void testPerUserResourcesXML() throws Exception { rm.start(); try { rm.submitApp(10,"app1","user1",null,"b1"); rm.submitApp(20,"app2","user2",null,"b1"); WebResource r=resource(); ClientResponse response=r.path("ws/v1/cluster/scheduler").accept(MediaType.APPLICATION_XML).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType()); String xml=response.getEntity(String.class); DocumentBuilder db=DocumentBuilderFactory.newInstance().newDocumentBuilder(); InputSource is=new InputSource(); is.setCharacterStream(new StringReader(xml)); Document dom=db.parse(is); NodeList allUsers=dom.getElementsByTagName("users"); for (int i=0; i < allUsers.getLength(); ++i) { Node perUserResources=allUsers.item(i); String queueName=getChildNodeByName(perUserResources.getParentNode(),"queueName").getTextContent(); if (queueName.equals("b1")) { assertEquals(2,perUserResources.getChildNodes().getLength()); NodeList users=perUserResources.getChildNodes(); for (int j=0; j < users.getLength(); ++j) { Node user=users.item(j); String username=getChildNodeByName(user,"username").getTextContent(); assertTrue(username.equals("user1") || username.equals("user2")); Integer.parseInt(getChildNodeByName(getChildNodeByName(user,"resourcesUsed"),"memory").getTextContent()); Integer.parseInt(getChildNodeByName(user,"numActiveApplications").getTextContent()); Integer.parseInt(getChildNodeByName(user,"numPendingApplications").getTextContent()); } } else { assertEquals(0,perUserResources.getChildNodes().getLength()); } } NodeList allResourcesUsed=dom.getElementsByTagName("resourcesUsed"); for (int i=0; i < allResourcesUsed.getLength(); ++i) { Node resourcesUsed=allResourcesUsed.item(i); Integer.parseInt(getChildNodeByName(resourcesUsed,"memory").getTextContent()); Integer.parseInt(getChildNodeByName(resourcesUsed,"vCores").getTextContent()); } } finally { rm.stop(); } }

Class: org.apache.hadoop.yarn.server.resourcemanager.webapp.TestRMWebServicesDelegationTokenAuthentication

APIUtilityVerifier UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testCancelledDelegationToken() throws Exception { String token=getDelegationToken("client"); cancelDelegationToken(token); ApplicationSubmissionContextInfo app=new ApplicationSubmissionContextInfo(); String appid="application_123_0"; app.setApplicationId(appid); String requestBody=getMarshalledAppInfo(app); URL url=new URL("http://localhost:8088/ws/v1/cluster/apps"); HttpURLConnection conn=(HttpURLConnection)url.openConnection(); conn.setRequestProperty(DelegationTokenHeader,token); setupConn(conn,"POST",MediaType.APPLICATION_XML,requestBody); try { conn.getInputStream(); fail("Authentication should fail with expired delegation tokens"); } catch ( IOException e) { assertEquals(Status.FORBIDDEN.getStatusCode(),conn.getResponseCode()); } return; }

APIUtilityVerifier UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testDelegationTokenOps() throws Exception { String token=getDelegationToken("client"); String createRequest="{\"renewer\":\"test\"}"; String renewRequest="{\"token\": \"" + token + "\"}"; String[] requests={createRequest,renewRequest}; for ( String requestBody : requests) { URL url=new URL("http://localhost:8088/ws/v1/cluster/delegation-token"); HttpURLConnection conn=(HttpURLConnection)url.openConnection(); conn.setRequestProperty(DelegationTokenHeader,token); setupConn(conn,"POST",MediaType.APPLICATION_JSON,requestBody); try { conn.getInputStream(); fail("Creation/Renewing delegation tokens should not be " + "allowed with token auth"); } catch ( IOException e) { assertEquals(Status.FORBIDDEN.getStatusCode(),conn.getResponseCode()); } } URL url=new URL("http://localhost:8088/ws/v1/cluster/delegation-token"); HttpURLConnection conn=(HttpURLConnection)url.openConnection(); conn.setRequestProperty(DelegationTokenHeader,token); conn.setRequestProperty(RMWebServices.DELEGATION_TOKEN_HEADER,token); setupConn(conn,"DELETE",null,null); try { conn.getInputStream(); fail("Cancelling delegation tokens should not be allowed with token auth"); } catch ( IOException e) { assertEquals(Status.FORBIDDEN.getStatusCode(),conn.getResponseCode()); } return; }

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testDelegationTokenAuth() throws Exception { final String token=getDelegationToken("test"); ApplicationSubmissionContextInfo app=new ApplicationSubmissionContextInfo(); String appid="application_123_0"; app.setApplicationId(appid); String requestBody=getMarshalledAppInfo(app); URL url=new URL("http://localhost:8088/ws/v1/cluster/apps"); HttpURLConnection conn=(HttpURLConnection)url.openConnection(); setupConn(conn,"POST","application/xml",requestBody); try { conn.getInputStream(); fail("we should not be here"); } catch ( IOException e) { assertEquals(Status.UNAUTHORIZED.getStatusCode(),conn.getResponseCode()); } conn=(HttpURLConnection)url.openConnection(); conn.setRequestProperty(DelegationTokenHeader,token); setupConn(conn,"POST",MediaType.APPLICATION_XML,requestBody); conn.getInputStream(); boolean appExists=rm.getRMContext().getRMApps().containsKey(ConverterUtils.toApplicationId(appid)); assertTrue(appExists); RMApp actualApp=rm.getRMContext().getRMApps().get(ConverterUtils.toApplicationId(appid)); String owner=actualApp.getUser(); assertEquals("client",owner); return; }

Class: org.apache.hadoop.yarn.server.resourcemanager.webapp.TestRMWebServicesDelegationTokens

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testRenewDelegationToken() throws Exception { client().addFilter(new LoggingFilter(System.out)); rm.start(); final String renewer="client2"; this.client().addFilter(new LoggingFilter(System.out)); final DelegationToken dummyToken=new DelegationToken(); dummyToken.setRenewer(renewer); String[] mediaTypes={MediaType.APPLICATION_JSON,MediaType.APPLICATION_XML}; for ( final String mediaType : mediaTypes) { for ( final String contentType : mediaTypes) { if (isKerberosAuth == false) { verifySimpleAuthRenew(mediaType,contentType); continue; } final DelegationToken responseToken=KerberosTestUtils.doAsClient(new Callable(){ @Override public DelegationToken call() throws Exception { ClientResponse response=resource().path("ws").path("v1").path("cluster").path("delegation-token").accept(contentType).entity(dummyToken,mediaType).post(ClientResponse.class); assertEquals(Status.OK,response.getClientResponseStatus()); DelegationToken tok=getDelegationTokenFromResponse(response); assertFalse(tok.getToken().isEmpty()); String body=generateRenewTokenBody(mediaType,tok.getToken()); response=resource().path("ws").path("v1").path("cluster").path("delegation-token").path("expiration").header(yarnTokenHeader,tok.getToken()).accept(contentType).entity(body,mediaType).post(ClientResponse.class); assertEquals(Status.FORBIDDEN,response.getClientResponseStatus()); return tok; } } ); KerberosTestUtils.doAs(renewer,new Callable(){ @Override public DelegationToken call() throws Exception { long oldExpirationTime=Time.now(); assertValidRMToken(responseToken.getToken()); String body=generateRenewTokenBody(mediaType,responseToken.getToken()); ClientResponse response=resource().path("ws").path("v1").path("cluster").path("delegation-token").path("expiration").header(yarnTokenHeader,responseToken.getToken()).accept(contentType).entity(body,mediaType).post(ClientResponse.class); assertEquals(Status.OK,response.getClientResponseStatus()); DelegationToken tok=getDelegationTokenFromResponse(response); String message="Expiration time not as expected: old = " + oldExpirationTime + "; new = "+ tok.getNextExpirationTime(); assertTrue(message,tok.getNextExpirationTime() > oldExpirationTime); oldExpirationTime=tok.getNextExpirationTime(); Thread.sleep(1000); response=resource().path("ws").path("v1").path("cluster").path("delegation-token").path("expiration").header(yarnTokenHeader,responseToken.getToken()).accept(contentType).entity(body,mediaType).post(ClientResponse.class); assertEquals(Status.OK,response.getClientResponseStatus()); tok=getDelegationTokenFromResponse(response); message="Expiration time not as expected: old = " + oldExpirationTime + "; new = "+ tok.getNextExpirationTime(); assertTrue(message,tok.getNextExpirationTime() > oldExpirationTime); return tok; } } ); KerberosTestUtils.doAs("client3",new Callable(){ @Override public DelegationToken call() throws Exception { String body=generateRenewTokenBody(mediaType,responseToken.getToken()); ClientResponse response=resource().path("ws").path("v1").path("cluster").path("delegation-token").path("expiration").header(yarnTokenHeader,responseToken.getToken()).accept(contentType).entity(body,mediaType).post(ClientResponse.class); assertEquals(Status.FORBIDDEN,response.getClientResponseStatus()); return null; } } ); KerberosTestUtils.doAsClient(new Callable(){ @Override public Void call() throws Exception { String token="TEST_TOKEN_STRING"; String body=""; if (mediaType.equals(MediaType.APPLICATION_JSON)) { body="{\"token\": \"" + token + "\" }"; } else { body="" + token + ""; } ClientResponse response=resource().path("ws").path("v1").path("cluster").path("delegation-token").path("expiration").accept(contentType).entity(body,mediaType).post(ClientResponse.class); assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus()); return null; } } ); } } rm.stop(); return; }

Class: org.apache.hadoop.yarn.server.resourcemanager.webapp.TestRMWebServicesNodes

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testNonexistNode() throws JSONException, Exception { rm.registerNode("h1:1234",5120); rm.registerNode("h2:1235",5121); WebResource r=resource(); try { r.path("ws").path("v1").path("cluster").path("nodes").path("node_invalid:99").accept(MediaType.APPLICATION_JSON).get(JSONObject.class); fail("should have thrown exception on non-existent nodeid"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.NOT_FOUND,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); verifyNonexistNodeException(message,type,classname); } finally { rm.stop(); } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testNodesQueryStateInvalid() throws JSONException, Exception { WebResource r=resource(); rm.registerNode("h1:1234",5120); rm.registerNode("h2:1235",5121); try { r.path("ws").path("v1").path("cluster").path("nodes").queryParam("states","BOGUSSTATE").accept(MediaType.APPLICATION_JSON).get(JSONObject.class); fail("should have thrown exception querying invalid state"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); WebServicesTestUtils.checkStringContains("exception message","org.apache.hadoop.yarn.api.records.NodeState.BOGUSSTATE",message); WebServicesTestUtils.checkStringMatch("exception type","IllegalArgumentException",type); WebServicesTestUtils.checkStringMatch("exception classname","java.lang.IllegalArgumentException",classname); } finally { rm.stop(); } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testNonexistNodeXML() throws JSONException, Exception { rm.registerNode("h1:1234",5120); rm.registerNode("h2:1235",5121); WebResource r=resource(); try { r.path("ws").path("v1").path("cluster").path("nodes").path("node_invalid:99").accept(MediaType.APPLICATION_XML).get(JSONObject.class); fail("should have thrown exception on non-existent nodeid"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.NOT_FOUND,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType()); String msg=response.getEntity(String.class); System.out.println(msg); DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance(); DocumentBuilder db=dbf.newDocumentBuilder(); InputSource is=new InputSource(); is.setCharacterStream(new StringReader(msg)); Document dom=db.parse(is); NodeList nodes=dom.getElementsByTagName("RemoteException"); Element element=(Element)nodes.item(0); String message=WebServicesTestUtils.getXmlString(element,"message"); String type=WebServicesTestUtils.getXmlString(element,"exception"); String classname=WebServicesTestUtils.getXmlString(element,"javaClassName"); verifyNonexistNodeException(message,type,classname); } finally { rm.stop(); } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testInvalidNode() throws JSONException, Exception { rm.registerNode("h1:1234",5120); rm.registerNode("h2:1235",5121); WebResource r=resource(); try { r.path("ws").path("v1").path("cluster").path("nodes").path("node_invalid_foo").accept(MediaType.APPLICATION_JSON).get(JSONObject.class); fail("should have thrown exception on non-existent nodeid"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); WebServicesTestUtils.checkStringMatch("exception message","Invalid NodeId \\[node_invalid_foo\\]. Expected host:port",message); WebServicesTestUtils.checkStringMatch("exception type","IllegalArgumentException",type); WebServicesTestUtils.checkStringMatch("exception classname","java.lang.IllegalArgumentException",classname); } finally { rm.stop(); } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testNonexistNodeDefault() throws JSONException, Exception { rm.registerNode("h1:1234",5120); rm.registerNode("h2:1235",5121); WebResource r=resource(); try { r.path("ws").path("v1").path("cluster").path("nodes").path("node_invalid:99").get(JSONObject.class); fail("should have thrown exception on non-existent nodeid"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.NOT_FOUND,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); verifyNonexistNodeException(message,type,classname); } finally { rm.stop(); } }

Class: org.apache.hadoop.yarn.server.resourcemanager.webapp.TestRMWebappAuthentication

APIUtilityVerifier UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testSimpleAuth() throws Exception { rm.start(); URL url=new URL("http://localhost:8088/cluster"); HttpURLConnection conn=(HttpURLConnection)url.openConnection(); try { conn.getInputStream(); assertEquals(Status.OK.getStatusCode(),conn.getResponseCode()); } catch ( Exception e) { fail("Fetching url failed"); } if (UserGroupInformation.isSecurityEnabled()) { testAnonymousKerberosUser(); } else { testAnonymousSimpleUser(); } rm.stop(); }

Class: org.apache.hadoop.yarn.server.timeline.TestLeveldbTimelineStore

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testRootDirPermission() throws IOException { FileSystem fs=FileSystem.getLocal(new YarnConfiguration()); FileStatus file=fs.getFileStatus(new Path(fsPath.getAbsolutePath(),LeveldbTimelineStore.FILENAME)); assertNotNull(file); assertEquals(LeveldbTimelineStore.LEVELDB_DIR_UMASK,file.getPermission()); }

UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testCheckVersion() throws IOException { LeveldbTimelineStore dbStore=(LeveldbTimelineStore)store; Version defaultVersion=dbStore.getCurrentVersion(); Assert.assertEquals(defaultVersion,dbStore.loadVersion()); Version compatibleVersion=Version.newInstance(defaultVersion.getMajorVersion(),defaultVersion.getMinorVersion() + 2); dbStore.storeVersion(compatibleVersion); Assert.assertEquals(compatibleVersion,dbStore.loadVersion()); restartTimelineStore(); dbStore=(LeveldbTimelineStore)store; Assert.assertEquals(defaultVersion,dbStore.loadVersion()); Version incompatibleVersion=Version.newInstance(defaultVersion.getMajorVersion() + 1,defaultVersion.getMinorVersion()); dbStore.storeVersion(incompatibleVersion); try { restartTimelineStore(); Assert.fail("Incompatible version, should expect fail here."); } catch ( ServiceStateException e) { Assert.assertTrue("Exception message mismatch",e.getMessage().contains("Incompatible version for timeline store")); } }

Class: org.apache.hadoop.yarn.server.timeline.security.TestTimelineACLsManager

UtilityVerifier BooleanVerifier HybridVerifier 
@Test public void testCorruptedOwnerInfo() throws Exception { Configuration conf=new YarnConfiguration(); conf.setBoolean(YarnConfiguration.YARN_ACL_ENABLE,true); conf.set(YarnConfiguration.YARN_ADMIN_ACL,"owner"); TimelineACLsManager timelineACLsManager=new TimelineACLsManager(conf); TimelineEntity entity=new TimelineEntity(); try { timelineACLsManager.checkAccess(UserGroupInformation.createRemoteUser("owner"),entity); Assert.fail("Exception is expected"); } catch ( YarnException e) { Assert.assertTrue("It's not the exact expected exception",e.getMessage().contains("is corrupted.")); } }

Class: org.apache.hadoop.yarn.server.timeline.webapp.TestCrossOriginFilterInitializer

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testGetFilterParameters(){ Configuration conf=new Configuration(); conf.set(CrossOriginFilterInitializer.PREFIX + "rootparam","rootvalue"); conf.set(CrossOriginFilterInitializer.PREFIX + "nested.param","nestedvalue"); conf.set("outofscopeparam","outofscopevalue"); Map filterParameters=CrossOriginFilterInitializer.getFilterParameters(conf); String rootvalue=filterParameters.get(CrossOriginFilterInitializer.PREFIX + "rootparam"); String nestedvalue=filterParameters.get(CrossOriginFilterInitializer.PREFIX + "nested.param"); String outofscopeparam=filterParameters.get("outofscopeparam"); Assert.assertEquals("Could not find filter parameter","rootvalue",rootvalue); Assert.assertEquals("Could not find filter parameter","nestedvalue",nestedvalue); Assert.assertNull("Found unexpected value in filter parameters",outofscopeparam); }

Class: org.apache.hadoop.yarn.server.timeline.webapp.TestTimelineWebServices

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testGetEvents() throws Exception { WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("timeline").path("type_1").path("events").queryParam("entityId","id_1").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); TimelineEvents events=response.getEntity(TimelineEvents.class); Assert.assertNotNull(events); Assert.assertEquals(1,events.getAllEvents().size()); TimelineEvents.EventsOfOneEntity partEvents=events.getAllEvents().get(0); Assert.assertEquals(2,partEvents.getEvents().size()); TimelineEvent event1=partEvents.getEvents().get(0); Assert.assertEquals(456l,event1.getTimestamp()); Assert.assertEquals("end_event",event1.getEventType()); Assert.assertEquals(1,event1.getEventInfo().size()); TimelineEvent event2=partEvents.getEvents().get(1); Assert.assertEquals(123l,event2.getTimestamp()); Assert.assertEquals("start_event",event2.getEventType()); Assert.assertEquals(0,event2.getEventInfo().size()); }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testPostEntitiesWithYarnACLsEnabled() throws Exception { AdminACLsManager oldAdminACLsManager=timelineACLsManager.setAdminACLsManager(adminACLsManager); try { TimelineEntities entities=new TimelineEntities(); TimelineEntity entity=new TimelineEntity(); entity.setEntityId("test id 2"); entity.setEntityType("test type 2"); entity.setStartTime(System.currentTimeMillis()); entities.addEntity(entity); WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("timeline").queryParam("user.name","tester").accept(MediaType.APPLICATION_JSON).type(MediaType.APPLICATION_JSON).post(ClientResponse.class,entities); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); TimelinePutResponse putResponse=response.getEntity(TimelinePutResponse.class); Assert.assertNotNull(putResponse); Assert.assertEquals(0,putResponse.getErrors().size()); response=r.path("ws").path("v1").path("timeline").queryParam("user.name","other").accept(MediaType.APPLICATION_JSON).type(MediaType.APPLICATION_JSON).post(ClientResponse.class,entities); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); putResponse=response.getEntity(TimelinePutResponse.class); Assert.assertNotNull(putResponse); Assert.assertEquals(1,putResponse.getErrors().size()); Assert.assertEquals(TimelinePutResponse.TimelinePutError.ACCESS_DENIED,putResponse.getErrors().get(0).getErrorCode()); } finally { timelineACLsManager.setAdminACLsManager(oldAdminACLsManager); } }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testGetEntityFields1() throws Exception { WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("timeline").path("type_1").path("id_1").queryParam("fields","events,otherinfo").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); TimelineEntity entity=response.getEntity(TimelineEntity.class); Assert.assertNotNull(entity); Assert.assertEquals("id_1",entity.getEntityId()); Assert.assertEquals("type_1",entity.getEntityType()); Assert.assertEquals(123l,entity.getStartTime().longValue()); Assert.assertEquals(2,entity.getEvents().size()); Assert.assertEquals(0,entity.getPrimaryFilters().size()); Assert.assertEquals(4,entity.getOtherInfo().size()); }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testGetEntityWithYarnACLsEnabled() throws Exception { AdminACLsManager oldAdminACLsManager=timelineACLsManager.setAdminACLsManager(adminACLsManager); try { TimelineEntities entities=new TimelineEntities(); TimelineEntity entity=new TimelineEntity(); entity.setEntityId("test id 3"); entity.setEntityType("test type 3"); entity.setStartTime(System.currentTimeMillis()); entities.addEntity(entity); WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("timeline").queryParam("user.name","tester").accept(MediaType.APPLICATION_JSON).type(MediaType.APPLICATION_JSON).post(ClientResponse.class,entities); response=r.path("ws").path("v1").path("timeline").path("test type 3").path("test id 3").queryParam("user.name","tester").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); entity=response.getEntity(TimelineEntity.class); Assert.assertNull(entity.getPrimaryFilters().get(TimelineStore.SystemFilter.ENTITY_OWNER.toString())); response=r.path("ws").path("v1").path("timeline").path("test type 3").path("test id 3").queryParam("fields","relatedentities").queryParam("user.name","tester").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); entity=response.getEntity(TimelineEntity.class); Assert.assertNull(entity.getPrimaryFilters().get(TimelineStore.SystemFilter.ENTITY_OWNER.toString())); response=r.path("ws").path("v1").path("timeline").path("test type 3").path("test id 3").queryParam("fields","primaryfilters").queryParam("user.name","tester").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); entity=response.getEntity(TimelineEntity.class); Assert.assertNull(entity.getPrimaryFilters().get(TimelineStore.SystemFilter.ENTITY_OWNER.toString())); response=r.path("ws").path("v1").path("timeline").path("test type 3").path("test id 3").queryParam("user.name","other").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); assertEquals(ClientResponse.Status.NOT_FOUND,response.getClientResponseStatus()); } finally { timelineACLsManager.setAdminACLsManager(oldAdminACLsManager); } }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testGetEntityFields2() throws Exception { WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("timeline").path("type_1").path("id_1").queryParam("fields","lasteventonly," + "primaryfilters,relatedentities").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); TimelineEntity entity=response.getEntity(TimelineEntity.class); Assert.assertNotNull(entity); Assert.assertEquals("id_1",entity.getEntityId()); Assert.assertEquals("type_1",entity.getEntityType()); Assert.assertEquals(123l,entity.getStartTime().longValue()); Assert.assertEquals(1,entity.getEvents().size()); Assert.assertEquals(4,entity.getPrimaryFilters().size()); Assert.assertEquals(0,entity.getOtherInfo().size()); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testAbout() throws Exception { WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("timeline").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); TimelineWebServices.AboutInfo about=response.getEntity(TimelineWebServices.AboutInfo.class); Assert.assertNotNull(about); Assert.assertEquals("Timeline API",about.getAbout()); }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testPostEntities() throws Exception { TimelineEntities entities=new TimelineEntities(); TimelineEntity entity=new TimelineEntity(); entity.setEntityId("test id 1"); entity.setEntityType("test type 1"); entity.setStartTime(System.currentTimeMillis()); entities.addEntity(entity); WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("timeline").accept(MediaType.APPLICATION_JSON).type(MediaType.APPLICATION_JSON).post(ClientResponse.class,entities); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); assertEquals(ClientResponse.Status.FORBIDDEN,response.getClientResponseStatus()); response=r.path("ws").path("v1").path("timeline").queryParam("user.name","tester").accept(MediaType.APPLICATION_JSON).type(MediaType.APPLICATION_JSON).post(ClientResponse.class,entities); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); TimelinePutResponse putResposne=response.getEntity(TimelinePutResponse.class); Assert.assertNotNull(putResposne); Assert.assertEquals(0,putResposne.getErrors().size()); response=r.path("ws").path("v1").path("timeline").path("test type 1").path("test id 1").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); entity=response.getEntity(TimelineEntity.class); Assert.assertNotNull(entity); Assert.assertEquals("test id 1",entity.getEntityId()); Assert.assertEquals("test type 1",entity.getEntityType()); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testGetEntity() throws Exception { WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("timeline").path("type_1").path("id_1").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); TimelineEntity entity=response.getEntity(TimelineEntity.class); Assert.assertNotNull(entity); Assert.assertEquals("id_1",entity.getEntityId()); Assert.assertEquals("type_1",entity.getEntityType()); Assert.assertEquals(123l,entity.getStartTime().longValue()); Assert.assertEquals(2,entity.getEvents().size()); Assert.assertEquals(4,entity.getPrimaryFilters().size()); Assert.assertEquals(4,entity.getOtherInfo().size()); }

Class: org.apache.hadoop.yarn.server.timeline.webapp.TestTimelineWebServicesWithSSL

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testPutEntities() throws Exception { TestTimelineClient client=new TestTimelineClient(); try { client.init(conf); client.start(); TimelineEntity expectedEntity=new TimelineEntity(); expectedEntity.setEntityType("test entity type"); expectedEntity.setEntityId("test entity id"); TimelineEvent event=new TimelineEvent(); event.setEventType("test event type"); event.setTimestamp(0L); expectedEntity.addEvent(event); TimelinePutResponse response=client.putEntities(expectedEntity); Assert.assertEquals(0,response.getErrors().size()); Assert.assertTrue(client.resp.toString().contains("https")); TimelineEntity actualEntity=store.getEntity(expectedEntity.getEntityId(),expectedEntity.getEntityType(),EnumSet.allOf(Field.class)); Assert.assertNotNull(actualEntity); Assert.assertEquals(expectedEntity.getEntityId(),actualEntity.getEntityId()); Assert.assertEquals(expectedEntity.getEntityType(),actualEntity.getEntityType()); } finally { client.stop(); client.close(); } }

Class: org.apache.hadoop.yarn.server.utils.TestLeveldbIterator

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier PublicFieldVerifier HybridVerifier 
@Test public void testExceptionHandling() throws Exception { InvocationHandler rtExcHandler=new InvocationHandler(){ @Override public Object invoke( Object proxy, Method method, Object[] args) throws Throwable { throw new RuntimeException("forced runtime error"); } } ; DBIterator dbiter=(DBIterator)Proxy.newProxyInstance(DBIterator.class.getClassLoader(),new Class[]{DBIterator.class},rtExcHandler); LeveldbIterator iter=new LeveldbIterator(dbiter); for ( CallInfo ci : RTEXC_METHODS) { Method method=iter.getClass().getMethod(ci.methodName,ci.argTypes); assertNotNull("unable to locate method " + ci.methodName,method); try { method.invoke(iter,ci.args); fail("operation should have thrown"); } catch ( InvocationTargetException ite) { Throwable exc=ite.getTargetException(); assertTrue("Method " + ci.methodName + " threw non-DBException: "+ exc,exc instanceof DBException); assertFalse("Method " + ci.methodName + " double-wrapped DBException",exc.getCause() instanceof DBException); } } try { iter.close(); fail("operation shoul have thrown"); } catch ( IOException e) { } }

Class: org.apache.hadoop.yarn.server.webproxy.TestWebAppProxyServlet

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
@Test(timeout=5000) public void testWebAppProxyServlet() throws Exception { Configuration configuration=new Configuration(); configuration.set(YarnConfiguration.PROXY_ADDRESS,"localhost:9090"); configuration.setInt("hadoop.http.max.threads",5); WebAppProxyServerForTest proxy=new WebAppProxyServerForTest(); proxy.init(configuration); proxy.start(); int proxyPort=proxy.proxy.proxyServer.getConnectorAddress(0).getPort(); AppReportFetcherForTest appReportFetcher=proxy.proxy.appReportFetcher; try { URL wrongUrl=new URL("http://localhost:" + proxyPort + "/proxy/app"); HttpURLConnection proxyConn=(HttpURLConnection)wrongUrl.openConnection(); proxyConn.connect(); assertEquals(HttpURLConnection.HTTP_INTERNAL_ERROR,proxyConn.getResponseCode()); URL url=new URL("http://localhost:" + proxyPort + "/proxy/application_00_0"); proxyConn=(HttpURLConnection)url.openConnection(); proxyConn.setRequestProperty("Cookie","checked_application_0_0000=true"); proxyConn.connect(); assertEquals(HttpURLConnection.HTTP_OK,proxyConn.getResponseCode()); assertTrue(isResponseCookiePresent(proxyConn,"checked_application_0_0000","true")); appReportFetcher.answer=1; proxyConn=(HttpURLConnection)url.openConnection(); proxyConn.setRequestProperty("Cookie","checked_application_0_0000=true"); proxyConn.connect(); assertEquals(HttpURLConnection.HTTP_NOT_FOUND,proxyConn.getResponseCode()); assertFalse(isResponseCookiePresent(proxyConn,"checked_application_0_0000","true")); appReportFetcher.answer=4; proxyConn=(HttpURLConnection)url.openConnection(); proxyConn.setRequestProperty("Cookie","checked_application_0_0000=true"); proxyConn.connect(); assertEquals(HttpURLConnection.HTTP_NOT_FOUND,proxyConn.getResponseCode()); assertFalse(isResponseCookiePresent(proxyConn,"checked_application_0_0000","true")); appReportFetcher.answer=2; proxyConn=(HttpURLConnection)url.openConnection(); proxyConn.connect(); assertEquals(HttpURLConnection.HTTP_OK,proxyConn.getResponseCode()); String s=readInputStream(proxyConn.getInputStream()); assertTrue(s.contains("to continue to an Application Master web interface owned by")); assertTrue(s.contains("WARNING: The following page may not be safe!")); appReportFetcher.answer=3; proxyConn=(HttpURLConnection)url.openConnection(); proxyConn.setRequestProperty("Cookie","checked_application_0_0000=true"); proxyConn.connect(); assertEquals(HttpURLConnection.HTTP_OK,proxyConn.getResponseCode()); } finally { proxy.close(); } }

APIUtilityVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Test main method of WebAppProxyServer */ @Test(timeout=5000) public void testWebAppProxyServerMainMethod() throws Exception { WebAppProxyServer mainServer=null; Configuration conf=new YarnConfiguration(); conf.set(YarnConfiguration.PROXY_ADDRESS,"localhost:9099"); try { mainServer=WebAppProxyServer.startServer(conf); int counter=20; URL wrongUrl=new URL("http://localhost:9099/proxy/app"); HttpURLConnection proxyConn=null; while (counter > 0) { counter--; try { proxyConn=(HttpURLConnection)wrongUrl.openConnection(); proxyConn.connect(); proxyConn.getResponseCode(); counter=0; } catch ( Exception e) { Thread.sleep(100); } } assertNotNull(proxyConn); assertEquals(HttpURLConnection.HTTP_INTERNAL_ERROR,proxyConn.getResponseCode()); } finally { if (mainServer != null) { mainServer.stop(); } } }

Class: org.apache.hadoop.yarn.server.webproxy.amfilter.TestAmFilter

UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test AmIpFilter */ @Test(timeout=1000) @SuppressWarnings("deprecation") public void testFilter() throws Exception { Map params=new HashMap(); params.put(AmIpFilter.PROXY_HOST,proxyHost); params.put(AmIpFilter.PROXY_URI_BASE,proxyUri); FilterConfig config=new DummyFilterConfig(params); FilterChain chain=new FilterChain(){ @Override public void doFilter( ServletRequest servletRequest, ServletResponse servletResponse) throws IOException, ServletException { doFilterRequest=servletRequest.getClass().getName(); if (servletRequest instanceof AmIpServletRequestWrapper) { servletWrapper=(AmIpServletRequestWrapper)servletRequest; } } } ; AmIpFilter testFilter=new AmIpFilter(); testFilter.init(config); HttpServletResponseForTest response=new HttpServletResponseForTest(); ServletRequest failRequest=Mockito.mock(ServletRequest.class); try { testFilter.doFilter(failRequest,response,chain); fail(); } catch ( ServletException e) { assertEquals("This filter only works for HTTP/HTTPS",e.getMessage()); } HttpServletRequest request=Mockito.mock(HttpServletRequest.class); Mockito.when(request.getRemoteAddr()).thenReturn("redirect"); Mockito.when(request.getRequestURI()).thenReturn("/redirect"); testFilter.doFilter(request,response,chain); assertEquals("http://bogus/redirect",response.getRedirect()); Mockito.when(request.getRemoteAddr()).thenReturn("127.0.0.1"); testFilter.doFilter(request,response,chain); assertTrue(doFilterRequest.contains("javax.servlet.http.HttpServletRequest")); Cookie[] cookies=new Cookie[1]; cookies[0]=new Cookie(WebAppProxyServlet.PROXY_USER_COOKIE_NAME,"user"); Mockito.when(request.getCookies()).thenReturn(cookies); testFilter.doFilter(request,response,chain); assertEquals("org.apache.hadoop.yarn.server.webproxy.amfilter.AmIpServletRequestWrapper",doFilterRequest); assertEquals("user",servletWrapper.getUserPrincipal().getName()); assertEquals("user",servletWrapper.getRemoteUser()); assertFalse(servletWrapper.isUserInRole("")); }

Class: org.apache.hadoop.yarn.server.webproxy.amfilter.TestAmFilterInitializer

NullVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
@Test public void testInitFilter(){ MockFilterContainer con=new MockFilterContainer(); Configuration conf=new Configuration(false); conf.set(YarnConfiguration.PROXY_ADDRESS,"host1:1000"); AmFilterInitializer afi=new MockAmFilterInitializer(); assertNull(con.givenParameters); afi.initFilter(con,conf); assertEquals(2,con.givenParameters.size()); assertEquals("host1",con.givenParameters.get(AmIpFilter.PROXY_HOSTS)); assertEquals("http://host1:1000/foo",con.givenParameters.get(AmIpFilter.PROXY_URI_BASES)); con=new MockFilterContainer(); conf=new Configuration(false); conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS,"host2:2000"); afi=new MockAmFilterInitializer(); assertNull(con.givenParameters); afi.initFilter(con,conf); assertEquals(2,con.givenParameters.size()); assertEquals("host2",con.givenParameters.get(AmIpFilter.PROXY_HOSTS)); assertEquals("http://host2:2000/foo",con.givenParameters.get(AmIpFilter.PROXY_URI_BASES)); con=new MockFilterContainer(); conf=new Configuration(false); conf.setBoolean(YarnConfiguration.RM_HA_ENABLED,true); conf.set(YarnConfiguration.RM_HA_IDS,"rm1,rm2,rm3"); conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS + ".rm1","host2:2000"); conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS + ".rm2","host3:3000"); conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS + ".rm3","host4:4000"); afi=new MockAmFilterInitializer(); assertNull(con.givenParameters); afi.initFilter(con,conf); assertEquals(2,con.givenParameters.size()); String[] proxyHosts=con.givenParameters.get(AmIpFilter.PROXY_HOSTS).split(AmIpFilter.PROXY_HOSTS_DELIMITER); assertEquals(3,proxyHosts.length); Arrays.sort(proxyHosts); assertEquals("host2",proxyHosts[0]); assertEquals("host3",proxyHosts[1]); assertEquals("host4",proxyHosts[2]); String[] proxyBases=con.givenParameters.get(AmIpFilter.PROXY_URI_BASES).split(AmIpFilter.PROXY_URI_BASES_DELIMITER); assertEquals(3,proxyBases.length); Arrays.sort(proxyBases); assertEquals("http://host2:2000/foo",proxyBases[0]); assertEquals("http://host3:3000/foo",proxyBases[1]); assertEquals("http://host4:4000/foo",proxyBases[2]); con=new MockFilterContainer(); conf=new Configuration(false); conf.set(YarnConfiguration.YARN_HTTP_POLICY_KEY,HttpConfig.Policy.HTTPS_ONLY.toString()); conf.setBoolean(YarnConfiguration.RM_HA_ENABLED,true); conf.set(YarnConfiguration.RM_HA_IDS,"rm1,rm2"); conf.set(YarnConfiguration.RM_WEBAPP_HTTPS_ADDRESS + ".rm1","host5:5000"); conf.set(YarnConfiguration.RM_WEBAPP_HTTPS_ADDRESS + ".rm2","host6:6000"); afi=new MockAmFilterInitializer(); assertNull(con.givenParameters); afi.initFilter(con,conf); assertEquals(2,con.givenParameters.size()); proxyHosts=con.givenParameters.get(AmIpFilter.PROXY_HOSTS).split(AmIpFilter.PROXY_HOSTS_DELIMITER); assertEquals(2,proxyHosts.length); Arrays.sort(proxyHosts); assertEquals("host5",proxyHosts[0]); assertEquals("host6",proxyHosts[1]); proxyBases=con.givenParameters.get(AmIpFilter.PROXY_URI_BASES).split(AmIpFilter.PROXY_URI_BASES_DELIMITER); assertEquals(2,proxyBases.length); Arrays.sort(proxyBases); assertEquals("https://host5:5000/foo",proxyBases[0]); assertEquals("https://host6:6000/foo",proxyBases[1]); }

Class: org.apache.hadoop.yarn.sls.appmaster.TestAMSimulator

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testAMSimulator() throws Exception { MockAMSimulator app=new MockAMSimulator(); List containers=new ArrayList(); app.init(1,1000,containers,rm,null,0,1000000l,"user1","default",false,"app1"); app.firstStep(); Assert.assertEquals(1,rm.getRMContext().getRMApps().size()); Assert.assertNotNull(rm.getRMContext().getRMApps().get(app.appId)); app.lastStep(); }

Class: org.apache.hadoop.yarn.sls.nodemanager.TestNMSimulator

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testNMSimulator() throws Exception { NMSimulator node1=new NMSimulator(); node1.init("rack1/node1",GB * 10,10,0,1000,rm); node1.middleStep(); Assert.assertEquals(1,rm.getResourceScheduler().getNumClusterNodes()); Assert.assertEquals(GB * 10,rm.getResourceScheduler().getRootQueueMetrics().getAvailableMB()); Assert.assertEquals(10,rm.getResourceScheduler().getRootQueueMetrics().getAvailableVirtualCores()); ContainerId cId1=newContainerId(1,1,1); Container container1=Container.newInstance(cId1,null,null,Resources.createResource(GB,1),null,null); node1.addNewContainer(container1,100000l); Assert.assertTrue("Node1 should have one running container.",node1.getRunningContainers().containsKey(cId1)); ContainerId cId2=newContainerId(2,1,1); Container container2=Container.newInstance(cId2,null,null,Resources.createResource(GB,1),null,null); node1.addNewContainer(container2,-1l); Assert.assertTrue("Node1 should have one running AM container",node1.getAMContainers().contains(cId2)); node1.cleanupContainer(cId1); Assert.assertTrue("Container1 should be removed from Node1.",node1.getCompletedContainers().contains(cId1)); node1.cleanupContainer(cId2); Assert.assertFalse("Container2 should be removed from Node1.",node1.getAMContainers().contains(cId2)); }

Class: org.apache.hadoop.yarn.sls.scheduler.TestTaskRunner

BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testMultiTask() throws Exception { runner.start(); runner.schedule(new MultiTask(0,20,5)); MultiTask.latch.await(5000,TimeUnit.MILLISECONDS); Assert.assertTrue(MultiTask.first); Assert.assertEquals((20 - 0) / 5 - 2 + 1,MultiTask.middle); Assert.assertTrue(MultiTask.last); }

Class: org.apache.hadoop.yarn.util.TestFSDownload

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
@Test(timeout=10000) public void testDownloadBadPublic() throws IOException, URISyntaxException, InterruptedException { Configuration conf=new Configuration(); conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY,"077"); FileContext files=FileContext.getLocalFSFileContext(conf); final Path basedir=files.makeQualified(new Path("target",TestFSDownload.class.getSimpleName())); files.mkdir(basedir,null,true); conf.setStrings(TestFSDownload.class.getName(),basedir.toString()); Map rsrcVis=new HashMap(); Random rand=new Random(); long sharedSeed=rand.nextLong(); rand.setSeed(sharedSeed); System.out.println("SEED: " + sharedSeed); Map> pending=new HashMap>(); ExecutorService exec=Executors.newSingleThreadExecutor(); LocalDirAllocator dirs=new LocalDirAllocator(TestFSDownload.class.getName()); int size=512; LocalResourceVisibility vis=LocalResourceVisibility.PUBLIC; Path path=new Path(basedir,"test-file"); LocalResource rsrc=createFile(files,path,size,rand,vis); rsrcVis.put(rsrc,vis); Path destPath=dirs.getLocalPathForWrite(basedir.toString(),size,conf); destPath=new Path(destPath,Long.toString(uniqueNumberGenerator.incrementAndGet())); FSDownload fsd=new FSDownload(files,UserGroupInformation.getCurrentUser(),conf,destPath,rsrc); pending.put(rsrc,exec.submit(fsd)); exec.shutdown(); while (!exec.awaitTermination(1000,TimeUnit.MILLISECONDS)) ; Assert.assertTrue(pending.get(rsrc).isDone()); try { for ( Map.Entry> p : pending.entrySet()) { p.getValue().get(); Assert.fail("We localized a file that is not public."); } } catch ( ExecutionException e) { Assert.assertTrue(e.getCause() instanceof IOException); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testUniqueDestinationPath() throws Exception { Configuration conf=new Configuration(); FileContext files=FileContext.getLocalFSFileContext(conf); final Path basedir=files.makeQualified(new Path("target",TestFSDownload.class.getSimpleName())); files.mkdir(basedir,null,true); conf.setStrings(TestFSDownload.class.getName(),basedir.toString()); ExecutorService singleThreadedExec=Executors.newSingleThreadExecutor(); LocalDirAllocator dirs=new LocalDirAllocator(TestFSDownload.class.getName()); Path destPath=dirs.getLocalPathForWrite(basedir.toString(),conf); destPath=new Path(destPath,Long.toString(uniqueNumberGenerator.incrementAndGet())); Path p=new Path(basedir,"dir" + 0 + ".jar"); LocalResourceVisibility vis=LocalResourceVisibility.PRIVATE; LocalResource rsrc=createJar(files,p,vis); FSDownload fsd=new FSDownload(files,UserGroupInformation.getCurrentUser(),conf,destPath,rsrc); Future rPath=singleThreadedExec.submit(fsd); singleThreadedExec.shutdown(); while (!singleThreadedExec.awaitTermination(1000,TimeUnit.MILLISECONDS)) ; Assert.assertTrue(rPath.isDone()); Assert.assertEquals(destPath,rPath.get().getParent()); }

BooleanVerifier AssumptionSetter IdentityVerifier HybridVerifier 
@Test(timeout=60000) public void testDownloadPublicWithStatCache() throws IOException, URISyntaxException, InterruptedException, ExecutionException { final Configuration conf=new Configuration(); FileContext files=FileContext.getLocalFSFileContext(conf); Path basedir=files.makeQualified(new Path("target",TestFSDownload.class.getSimpleName())); FileSystem f=basedir.getFileSystem(conf); assumeTrue(FSDownload.ancestorsHaveExecutePermissions(f,basedir,null)); files.mkdir(basedir,null,true); conf.setStrings(TestFSDownload.class.getName(),basedir.toString()); int size=512; final ConcurrentMap counts=new ConcurrentHashMap(); final CacheLoader> loader=FSDownload.createStatusCacheLoader(conf); final LoadingCache> statCache=CacheBuilder.newBuilder().build(new CacheLoader>(){ public Future load( Path path) throws Exception { AtomicInteger count=counts.get(path); if (count == null) { count=new AtomicInteger(0); AtomicInteger existing=counts.putIfAbsent(path,count); if (existing != null) { count=existing; } } count.incrementAndGet(); return loader.load(path); } } ); final int fileCount=3; List> tasks=new ArrayList>(); for (int i=0; i < fileCount; i++) { Random rand=new Random(); long sharedSeed=rand.nextLong(); rand.setSeed(sharedSeed); System.out.println("SEED: " + sharedSeed); final Path path=new Path(basedir,"test-file-" + i); createFile(files,path,size,rand); final FileSystem fs=path.getFileSystem(conf); final FileStatus sStat=fs.getFileStatus(path); tasks.add(new Callable(){ public Boolean call() throws IOException { return FSDownload.isPublic(fs,path,sStat,statCache); } } ); } ExecutorService exec=Executors.newFixedThreadPool(fileCount); try { List> futures=exec.invokeAll(tasks); for ( Future future : futures) { assertTrue(future.get()); } for ( AtomicInteger count : counts.values()) { assertSame(count.get(),1); } } finally { exec.shutdown(); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testDownload() throws IOException, URISyntaxException, InterruptedException { Configuration conf=new Configuration(); conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY,"077"); FileContext files=FileContext.getLocalFSFileContext(conf); final Path basedir=files.makeQualified(new Path("target",TestFSDownload.class.getSimpleName())); files.mkdir(basedir,null,true); conf.setStrings(TestFSDownload.class.getName(),basedir.toString()); Map rsrcVis=new HashMap(); Random rand=new Random(); long sharedSeed=rand.nextLong(); rand.setSeed(sharedSeed); System.out.println("SEED: " + sharedSeed); Map> pending=new HashMap>(); ExecutorService exec=Executors.newSingleThreadExecutor(); LocalDirAllocator dirs=new LocalDirAllocator(TestFSDownload.class.getName()); int[] sizes=new int[10]; for (int i=0; i < 10; ++i) { sizes[i]=rand.nextInt(512) + 512; LocalResourceVisibility vis=LocalResourceVisibility.PRIVATE; if (i % 2 == 1) { vis=LocalResourceVisibility.APPLICATION; } Path p=new Path(basedir,"" + i); LocalResource rsrc=createFile(files,p,sizes[i],rand,vis); rsrcVis.put(rsrc,vis); Path destPath=dirs.getLocalPathForWrite(basedir.toString(),sizes[i],conf); destPath=new Path(destPath,Long.toString(uniqueNumberGenerator.incrementAndGet())); FSDownload fsd=new FSDownload(files,UserGroupInformation.getCurrentUser(),conf,destPath,rsrc); pending.put(rsrc,exec.submit(fsd)); } exec.shutdown(); while (!exec.awaitTermination(1000,TimeUnit.MILLISECONDS)) ; for ( Future path : pending.values()) { Assert.assertTrue(path.isDone()); } try { for ( Map.Entry> p : pending.entrySet()) { Path localized=p.getValue().get(); assertEquals(sizes[Integer.valueOf(localized.getName())],p.getKey().getSize()); FileStatus status=files.getFileStatus(localized.getParent()); FsPermission perm=status.getPermission(); assertEquals("Cache directory permissions are incorrect",new FsPermission((short)0755),perm); status=files.getFileStatus(localized); perm=status.getPermission(); System.out.println("File permission " + perm + " for rsrc vis "+ p.getKey().getVisibility().name()); assert (rsrcVis.containsKey(p.getKey())); Assert.assertTrue("Private file should be 500",perm.toShort() == FSDownload.PRIVATE_FILE_PERMS.toShort()); } } catch ( ExecutionException e) { throw new IOException("Failed exec",e); } }

Class: org.apache.hadoop.yarn.util.TestProcfsBasedProcessTree

APIUtilityVerifier IterativeVerifier BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
@Test(timeout=30000) public void testProcessTree() throws Exception { try { Assert.assertTrue(ProcfsBasedProcessTree.isAvailable()); } catch ( Exception e) { LOG.info(StringUtils.stringifyException(e)); Assert.assertTrue("ProcfsBaseProcessTree should be available on Linux",false); return; } Random rm=new Random(); File tempFile=new File(TEST_ROOT_DIR,getClass().getName() + "_shellScript_" + rm.nextInt()+ ".sh"); tempFile.deleteOnExit(); shellScript=TEST_ROOT_DIR + File.separator + tempFile.getName(); tempFile=new File(TEST_ROOT_DIR,getClass().getName() + "_pidFile_" + rm.nextInt()+ ".pid"); tempFile.deleteOnExit(); pidFile=TEST_ROOT_DIR + File.separator + tempFile.getName(); lowestDescendant=TEST_ROOT_DIR + File.separator + "lowestDescendantPidFile"; try { FileWriter fWriter=new FileWriter(shellScript); fWriter.write("# rogue task\n" + "sleep 1\n" + "echo hello\n"+ "if [ $1 -ne 0 ]\n"+ "then\n"+ " sh " + shellScript + " $(($1-1))\n"+ "else\n"+ " echo $$ > "+ lowestDescendant+ "\n"+ " while true\n do\n"+ " sleep 5\n"+ " done\n"+ "fi"); fWriter.close(); } catch ( IOException ioe) { LOG.info("Error: " + ioe); return; } Thread t=new RogueTaskThread(); t.start(); String pid=getRogueTaskPID(); LOG.info("Root process pid: " + pid); ProcfsBasedProcessTree p=createProcessTree(pid); p.updateProcessTree(); LOG.info("ProcessTree: " + p.toString()); File leaf=new File(lowestDescendant); while (!leaf.exists()) { try { Thread.sleep(500); } catch ( InterruptedException ie) { break; } } p.updateProcessTree(); LOG.info("ProcessTree: " + p.toString()); String processTreeDump=p.getProcessTreeDump(); destroyProcessTree(pid); boolean isAlive=true; for (int tries=100; tries > 0; tries--) { if (isSetsidAvailable()) { isAlive=isAnyProcessInTreeAlive(p); } else { isAlive=isAlive(pid); } if (!isAlive) { break; } Thread.sleep(100); } if (isAlive) { fail("ProcessTree shouldn't be alive"); } LOG.info("Process-tree dump follows: \n" + processTreeDump); Assert.assertTrue("Process-tree dump doesn't start with a proper header",processTreeDump.startsWith("\t|- PID PPID PGRPID SESSID CMD_NAME " + "USER_MODE_TIME(MILLIS) SYSTEM_TIME(MILLIS) VMEM_USAGE(BYTES) " + "RSSMEM_USAGE(PAGES) FULL_CMD_LINE\n")); for (int i=N; i >= 0; i--) { String cmdLineDump="\\|- [0-9]+ [0-9]+ [0-9]+ [0-9]+ \\(sh\\)" + " [0-9]+ [0-9]+ [0-9]+ [0-9]+ sh " + shellScript + " "+ i; Pattern pat=Pattern.compile(cmdLineDump); Matcher mat=pat.matcher(processTreeDump); Assert.assertTrue("Process-tree dump doesn't contain the cmdLineDump of " + i + "th process!",mat.find()); } try { t.join(2000); LOG.info("RogueTaskThread successfully joined."); } catch ( InterruptedException ie) { LOG.info("Interrupted while joining RogueTaskThread."); } p.updateProcessTree(); Assert.assertFalse("ProcessTree must have been gone",isAlive(pid)); Assert.assertTrue("Cumulative vmem for the gone-process is " + p.getCumulativeVmem() + " . It should be zero.",p.getCumulativeVmem() == 0); Assert.assertTrue(p.toString().equals("[ ]")); }

TestInitializer AssumptionSetter HybridVerifier 
@Before public void setup() throws IOException { assumeTrue(Shell.LINUX); FileContext.getLocalFSFileContext().delete(new Path(TEST_ROOT_DIR.getAbsolutePath()),true); }

Class: org.apache.hadoop.yarn.util.TestResourceCalculatorProcessTree

NullVerifier ConditionMatcher HybridVerifier 
@Test public void testCreateInstance(){ ResourceCalculatorProcessTree tree; tree=ResourceCalculatorProcessTree.getResourceCalculatorProcessTree("1",EmptyProcessTree.class,new Configuration()); assertNotNull(tree); assertThat(tree,instanceOf(EmptyProcessTree.class)); }

APIUtilityVerifier NullVerifier ConditionMatcher HybridVerifier 
@Test public void testCreatedInstanceConfigured(){ ResourceCalculatorProcessTree tree; Configuration conf=new Configuration(); tree=ResourceCalculatorProcessTree.getResourceCalculatorProcessTree("1",EmptyProcessTree.class,conf); assertNotNull(tree); assertThat(tree.getConf(),sameInstance(conf)); }

Class: org.apache.hadoop.yarn.util.TestYarnVersionInfo

BooleanVerifier NullVerifier HybridVerifier 
/** * Test the yarn version info routines. * @throws IOException */ @Test public void versionInfoGenerated() throws IOException { assertTrue("getVersion returned Unknown",!YarnVersionInfo.getVersion().equals("Unknown")); assertTrue("getUser returned Unknown",!YarnVersionInfo.getUser().equals("Unknown")); assertTrue("getSrcChecksum returned Unknown",!YarnVersionInfo.getSrcChecksum().equals("Unknown")); assertNotNull("getUrl returned null",YarnVersionInfo.getUrl()); assertNotNull("getRevision returned null",YarnVersionInfo.getRevision()); assertNotNull("getBranch returned null",YarnVersionInfo.getBranch()); assertTrue("getBuildVersion check doesn't contain: source checksum",YarnVersionInfo.getBuildVersion().contains("source checksum")); }

Class: org.apache.hadoop.yarn.webapp.TestWebApp

InternalCallVerifier EqualityVerifier ExceptionVerifier HybridVerifier 
@Test(expected=org.apache.hadoop.yarn.webapp.WebAppException.class) public void testCreateWithNonZeroPort(){ WebApp app=WebApps.$for(this).at(50000).start(); int port=app.getListenerAddress().getPort(); assertEquals(50000,port); WebApp app2=WebApps.$for(this).at(50000).start(); app.stop(); app2.stop(); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testCreateWithPort(){ WebApp app=WebApps.$for(this).at(0).start(); int port=app.getListenerAddress().getPort(); assertTrue(port > 0); app.stop(); app=WebApps.$for(this).at(port).start(); assertEquals(port,app.getListenerAddress().getPort()); app.stop(); }

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testCustomRoutes() throws Exception { WebApp app=WebApps.$for("test",TestWebApp.class,this,"ws").start(new WebApp(){ @Override public void setup(){ bind(MyTestJAXBContextResolver.class); bind(MyTestWebService.class); route("/:foo",FooController.class); route("/bar/foo",FooController.class,"bar"); route("/foo/:foo",DefaultController.class); route("/foo/bar/:foo",DefaultController.class,"index"); } } ); String baseUrl=baseUrl(app); try { assertEquals("foo",getContent(baseUrl).trim()); assertEquals("foo",getContent(baseUrl + "test").trim()); assertEquals("foo1",getContent(baseUrl + "test/1").trim()); assertEquals("bar",getContent(baseUrl + "test/bar/foo").trim()); assertEquals("default",getContent(baseUrl + "test/foo/bar").trim()); assertEquals("default1",getContent(baseUrl + "test/foo/1").trim()); assertEquals("default2",getContent(baseUrl + "test/foo/bar/2").trim()); assertEquals(404,getResponseCode(baseUrl + "test/goo")); assertEquals(200,getResponseCode(baseUrl + "ws/v1/test")); assertTrue(getContent(baseUrl + "ws/v1/test").contains("myInfo")); } finally { app.stop(); } }

IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testServePathsNoName(){ WebApp app=WebApps.$for("",this).start(); assertEquals("/",app.getRedirectPath()); String[] expectedPaths={"/*"}; String[] pathSpecs=app.getServePathSpecs(); assertEquals(1,pathSpecs.length); for (int i=0; i < expectedPaths.length; i++) { assertTrue(ArrayUtils.contains(pathSpecs,expectedPaths[i])); } app.stop(); }

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testYARNWebAppContext() throws Exception { System.setProperty("hadoop.log.dir","/Not/Existing/dir"); WebApp app=WebApps.$for("test",this).start(new WebApp(){ @Override public void setup(){ route("/",FooController.class); } } ); String baseUrl=baseUrl(app); try { assertFalse("foo".equals(getContent(baseUrl + "static").trim())); assertEquals(404,getResponseCode(baseUrl + "logs")); assertEquals("foo",getContent(baseUrl).trim()); } finally { app.stop(); } }

InternalCallVerifier EqualityVerifier ExceptionVerifier HybridVerifier 
@Test(expected=org.apache.hadoop.yarn.webapp.WebAppException.class) public void testCreateWithBindAddressNonZeroPort(){ WebApp app=WebApps.$for(this).at("0.0.0.0:50000").start(); int port=app.getListenerAddress().getPort(); assertEquals(50000,port); WebApp app2=WebApps.$for(this).at("0.0.0.0:50000").start(); app.stop(); app2.stop(); }

IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testServePaths(){ WebApp app=WebApps.$for("test",this).start(); assertEquals("/test",app.getRedirectPath()); String[] expectedPaths={"/test","/test/*"}; String[] pathSpecs=app.getServePathSpecs(); assertEquals(2,pathSpecs.length); for (int i=0; i < expectedPaths.length; i++) { assertTrue(ArrayUtils.contains(pathSpecs,expectedPaths[i])); } app.stop(); }

Class: org.apache.hadoop.yarn.webapp.hamlet.TestParseSelector

NullVerifier EqualityVerifier HybridVerifier 
@Test public void testMissingId(){ String[] res=parseSelector(".class"); assertNull(res[S_ID]); assertEquals("class",res[S_CLASS]); }

NullVerifier EqualityVerifier HybridVerifier 
@Test public void testMissingClass(){ String[] res=parseSelector("#id"); assertEquals("id",res[S_ID]); assertNull(res[S_CLASS]); }